From 9d2bb1e43a322878f4383a77d5b974319870ef46 Mon Sep 17 00:00:00 2001 From: Elias Floreteng <18127101+eliasfloreteng@users.noreply.github.com> Date: Tue, 25 Feb 2025 17:53:23 +0000 Subject: [PATCH 01/16] Add http file boolean cli argument Co-authored-by: Jakub Rybak --- httpie/cli/definition.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/httpie/cli/definition.py b/httpie/cli/definition.py index 843b29c9..444b4a75 100644 --- a/httpie/cli/definition.py +++ b/httpie/cli/definition.py @@ -218,6 +218,18 @@ content_types.add_argument( """, ) +content_types.add_argument( + "--http-file", + action="store_true", + default=False, + short_help="Parse and send an HTTP request from a .http file", + help=""" + Parse and send an HTTP request from a file in .http format. + The file should contain a valid HTTP request with headers and body. + If this is specified, URL will be treated as a file path. + """, +) + ####################################################################### # Content processing. ####################################################################### From d272be9ba476dfffcb543eed29b84f7b2d1f4ecd Mon Sep 17 00:00:00 2001 From: Elias Floreteng <18127101+eliasfloreteng@users.noreply.github.com> Date: Tue, 25 Feb 2025 17:54:05 +0000 Subject: [PATCH 02/16] Disable processing of url if http file flag Co-authored-by: Jakub Rybak --- httpie/cli/argparser.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/httpie/cli/argparser.py b/httpie/cli/argparser.py index 9bf09b3b..7d333ee6 100644 --- a/httpie/cli/argparser.py +++ b/httpie/cli/argparser.py @@ -203,6 +203,8 @@ class HTTPieArgumentParser(BaseHTTPieArgumentParser): } def _process_url(self): + if self.args.http_file: + return if self.args.url.startswith('://'): # Paste URL & add space shortcut: `http ://pie.dev` → `http://pie.dev` self.args.url = self.args.url[3:] From db677049c68e925fb819e20d158f0f313ba0f466 Mon Sep 17 00:00:00 2001 From: Elias Floreteng <18127101+eliasfloreteng@users.noreply.github.com> Date: Tue, 25 Feb 2025 17:54:26 +0000 Subject: [PATCH 03/16] Add print of url when specifying http file flag --- httpie/core.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/httpie/core.py b/httpie/core.py index d0c26dcb..6140b0ef 100644 --- a/httpie/core.py +++ b/httpie/core.py @@ -204,6 +204,10 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus: args.follow = True # --download implies --follow. downloader = Downloader(env, output_file=args.output_file, resume=args.download_resume) downloader.pre_request(args.headers) + + if args.http_file: + print("################# Reading from HTTP file:", args.url) + messages = collect_messages(env, args=args, request_body_read_callback=request_body_read_callback) force_separator = False From e8c423b4ec4363257bfa4611b52004a9c0305594 Mon Sep 17 00:00:00 2001 From: Elias Floreteng <18127101+eliasfloreteng@users.noreply.github.com> Date: Tue, 25 Feb 2025 18:05:19 +0000 Subject: [PATCH 04/16] Create placeholder http parser file for collect_messages Co-authored-by: Jakub Rybak --- httpie/http_parser.py | 136 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 httpie/http_parser.py diff --git a/httpie/http_parser.py b/httpie/http_parser.py new file mode 100644 index 00000000..ff213d1f --- /dev/null +++ b/httpie/http_parser.py @@ -0,0 +1,136 @@ +import argparse +from time import monotonic +from typing import Callable, Iterable + +import requests +# noinspection PyPackageRequirements +import urllib3 + +from httpie.client import ( + build_requests_session, + dump_request, + ensure_path_as_is, + make_request_kwargs, make_send_kwargs, + make_send_kwargs_mergeable_from_env, + max_headers, + transform_headers +) +from . import __version__ +from .cli.constants import HTTP_OPTIONS +from .context import Environment +from .encoding import UTF8 +from .models import RequestsMessage +from .sessions import get_httpie_session +from .uploads import ( + compress_request, +) +from .utils import get_expired_cookies + + +urllib3.disable_warnings() + +FORM_CONTENT_TYPE = f'application/x-www-form-urlencoded; charset={UTF8}' +JSON_CONTENT_TYPE = 'application/json' +JSON_ACCEPT = f'{JSON_CONTENT_TYPE}, */*;q=0.5' +DEFAULT_UA = f'HTTPie/{__version__}' + +IGNORE_CONTENT_LENGTH_METHODS = frozenset([HTTP_OPTIONS]) + +def collect_messages( + env: Environment, + args: argparse.Namespace, + request_body_read_callback: Callable[[bytes], None] = None, +) -> Iterable[RequestsMessage]: + httpie_session = None + httpie_session_headers = None + if args.session or args.session_read_only: + httpie_session = get_httpie_session( + env=env, + config_dir=env.config.directory, + session_name=args.session or args.session_read_only, + host=args.headers.get('Host'), + url=args.url, + ) + httpie_session_headers = httpie_session.headers + + request_kwargs = make_request_kwargs( + env, + args=args, + base_headers=httpie_session_headers, + request_body_read_callback=request_body_read_callback + ) + send_kwargs = make_send_kwargs(args) + send_kwargs_mergeable_from_env = make_send_kwargs_mergeable_from_env(args) + requests_session = build_requests_session( + ssl_version=args.ssl_version, + ciphers=args.ciphers, + verify=bool(send_kwargs_mergeable_from_env['verify']) + ) + + if httpie_session: + httpie_session.update_headers(request_kwargs['headers']) + requests_session.cookies = httpie_session.cookies + if args.auth_plugin: + # Save auth from CLI to HTTPie session. + httpie_session.auth = { + 'type': args.auth_plugin.auth_type, + 'raw_auth': args.auth_plugin.raw_auth, + } + elif httpie_session.auth: + # Apply auth from HTTPie session + request_kwargs['auth'] = httpie_session.auth + + if args.debug: + # TODO: reflect the split between request and send kwargs. + dump_request(request_kwargs) + + request = requests.Request(**request_kwargs) + prepared_request = requests_session.prepare_request(request) + transform_headers(request, prepared_request) + if args.path_as_is: + prepared_request.url = ensure_path_as_is( + orig_url=args.url, + prepped_url=prepared_request.url, + ) + if args.compress and prepared_request.body: + compress_request( + request=prepared_request, + always=args.compress > 1, + ) + response_count = 0 + expired_cookies = [] + while prepared_request: + yield prepared_request + if not args.offline: + send_kwargs_merged = requests_session.merge_environment_settings( + url=prepared_request.url, + **send_kwargs_mergeable_from_env, + ) + with max_headers(args.max_headers): + response = requests_session.send( + request=prepared_request, + **send_kwargs_merged, + **send_kwargs, + ) + response._httpie_headers_parsed_at = monotonic() + expired_cookies += get_expired_cookies( + response.headers.get('Set-Cookie', '') + ) + + response_count += 1 + if response.next: + if args.max_redirects and response_count == args.max_redirects: + raise requests.TooManyRedirects + if args.follow: + prepared_request = response.next + if args.all: + yield response + continue + yield response + break + + if httpie_session: + if httpie_session.is_new() or not args.session_read_only: + httpie_session.cookies = requests_session.cookies + httpie_session.remove_cookies(expired_cookies) + httpie_session.save() From c37e5aefe6ce70f9c0c9a8bdeed40f8245a7677a Mon Sep 17 00:00:00 2001 From: Elias Floreteng <18127101+eliasfloreteng@users.noreply.github.com> Date: Wed, 26 Feb 2025 14:55:19 +0000 Subject: [PATCH 05/16] Add comment to specify why skipping process_url Co-authored-by: Jakub Rybak --- httpie/cli/argparser.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/httpie/cli/argparser.py b/httpie/cli/argparser.py index 7d333ee6..e4dc7baf 100644 --- a/httpie/cli/argparser.py +++ b/httpie/cli/argparser.py @@ -204,6 +204,8 @@ class HTTPieArgumentParser(BaseHTTPieArgumentParser): def _process_url(self): if self.args.http_file: + # do not add default scheme + # treat URL as a filename if --http-file is specified return if self.args.url.startswith('://'): # Paste URL & add space shortcut: `http ://pie.dev` → `http://pie.dev` From 51e61eb565f6dca25a2a83cda20224db8c29ef6c Mon Sep 17 00:00:00 2001 From: Elias Floreteng <18127101+eliasfloreteng@users.noreply.github.com> Date: Wed, 26 Feb 2025 15:04:00 +0000 Subject: [PATCH 06/16] Add basic file parsing and loop for http file Co-authored-by: Jakub Rybak --- httpie/core.py | 200 +++++++++++++++++++++++------------------- httpie/http_parser.py | 158 ++++++--------------------------- 2 files changed, 136 insertions(+), 222 deletions(-) diff --git a/httpie/core.py b/httpie/core.py index 6140b0ef..e76ef9ba 100644 --- a/httpie/core.py +++ b/httpie/core.py @@ -15,6 +15,7 @@ from .cli.nested_json import NestedJSONSyntaxError from .client import collect_messages from .context import Environment, LogLevel from .downloads import Downloader +from .http_parser import http_parser from .models import ( RequestsMessageKind, OutputOptions @@ -172,103 +173,118 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus: The main program without error handling. """ - # TODO: Refactor and drastically simplify, especially so that the separator logic is elsewhere. - exit_status = ExitStatus.SUCCESS - downloader = None - initial_request: Optional[requests.PreparedRequest] = None - final_response: Optional[requests.Response] = None - processing_options = ProcessingOptions.from_raw_args(args) - def separate(): - getattr(env.stdout, 'buffer', env.stdout).write(MESSAGE_SEPARATOR_BYTES) + def actual_program(args: argparse.Namespace, env: Environment) -> ExitStatus: + # TODO: Refactor and drastically simplify, especially so that the separator logic is elsewhere. + exit_status = ExitStatus.SUCCESS + downloader = None + initial_request: Optional[requests.PreparedRequest] = None + final_response: Optional[requests.Response] = None + processing_options = ProcessingOptions.from_raw_args(args) - def request_body_read_callback(chunk: bytes): - should_pipe_to_stdout = bool( - # Request body output desired - OUT_REQ_BODY in args.output_options - # & not `.read()` already pre-request (e.g., for compression) - and initial_request - # & non-EOF chunk - and chunk - ) - if should_pipe_to_stdout: - return write_raw_data( - env, - chunk, - processing_options=processing_options, - headers=initial_request.headers + def separate(): + getattr(env.stdout, 'buffer', env.stdout).write(MESSAGE_SEPARATOR_BYTES) + + def request_body_read_callback(chunk: bytes): + should_pipe_to_stdout = bool( + # Request body output desired + OUT_REQ_BODY in args.output_options + # & not `.read()` already pre-request (e.g., for compression) + and initial_request + # & non-EOF chunk + and chunk ) - - try: - if args.download: - args.follow = True # --download implies --follow. - downloader = Downloader(env, output_file=args.output_file, resume=args.download_resume) - downloader.pre_request(args.headers) - - if args.http_file: - print("################# Reading from HTTP file:", args.url) - - messages = collect_messages(env, args=args, - request_body_read_callback=request_body_read_callback) - force_separator = False - prev_with_body = False - - # Process messages as they’re generated - for message in messages: - output_options = OutputOptions.from_message(message, args.output_options) - - do_write_body = output_options.body - if prev_with_body and output_options.any() and (force_separator or not env.stdout_isatty): - # Separate after a previous message with body, if needed. See test_tokens.py. - separate() - force_separator = False - if output_options.kind is RequestsMessageKind.REQUEST: - if not initial_request: - initial_request = message - if output_options.body: - is_streamed_upload = not isinstance(message.body, (str, bytes)) - do_write_body = not is_streamed_upload - force_separator = is_streamed_upload and env.stdout_isatty - else: - final_response = message - if args.check_status or downloader: - exit_status = http_status_to_exit_status(http_status=message.status_code, follow=args.follow) - if exit_status != ExitStatus.SUCCESS and (not env.stdout_isatty or args.quiet == 1): - env.log_error(f'HTTP {message.raw.status} {message.raw.reason}', level=LogLevel.WARNING) - write_message( - requests_message=message, - env=env, - output_options=output_options._replace( - body=do_write_body - ), - processing_options=processing_options - ) - prev_with_body = output_options.body - - # Cleanup - if force_separator: - separate() - if downloader and exit_status == ExitStatus.SUCCESS: - # Last response body download. - download_stream, download_to = downloader.start( - initial_url=initial_request.url, - final_response=final_response, - ) - write_stream(stream=download_stream, outfile=download_to, flush=False) - downloader.finish() - if downloader.interrupted: - exit_status = ExitStatus.ERROR - env.log_error( - f'Incomplete download: size={downloader.status.total_size};' - f' downloaded={downloader.status.downloaded}' + if should_pipe_to_stdout: + return write_raw_data( + env, + chunk, + processing_options=processing_options, + headers=initial_request.headers ) - return exit_status - finally: - if downloader and not downloader.finished: - downloader.failed() - if args.output_file and args.output_file_specified: - args.output_file.close() + try: + if args.download: + args.follow = True # --download implies --follow. + downloader = Downloader(env, output_file=args.output_file, resume=args.download_resume) + downloader.pre_request(args.headers) + + + messages = collect_messages(env, args=args, + request_body_read_callback=request_body_read_callback) + force_separator = False + prev_with_body = False + + # Process messages as they’re generated + for message in messages: + output_options = OutputOptions.from_message(message, args.output_options) + + do_write_body = output_options.body + if prev_with_body and output_options.any() and (force_separator or not env.stdout_isatty): + # Separate after a previous message with body, if needed. See test_tokens.py. + separate() + force_separator = False + if output_options.kind is RequestsMessageKind.REQUEST: + if not initial_request: + initial_request = message + if output_options.body: + is_streamed_upload = not isinstance(message.body, (str, bytes)) + do_write_body = not is_streamed_upload + force_separator = is_streamed_upload and env.stdout_isatty + else: + final_response = message + if args.check_status or downloader: + exit_status = http_status_to_exit_status(http_status=message.status_code, follow=args.follow) + if exit_status != ExitStatus.SUCCESS and (not env.stdout_isatty or args.quiet == 1): + env.log_error(f'HTTP {message.raw.status} {message.raw.reason}', level=LogLevel.WARNING) + write_message( + requests_message=message, + env=env, + output_options=output_options._replace( + body=do_write_body + ), + processing_options=processing_options + ) + prev_with_body = output_options.body + + # Cleanup + if force_separator: + separate() + if downloader and exit_status == ExitStatus.SUCCESS: + # Last response body download. + download_stream, download_to = downloader.start( + initial_url=initial_request.url, + final_response=final_response, + ) + write_stream(stream=download_stream, outfile=download_to, flush=False) + downloader.finish() + if downloader.interrupted: + exit_status = ExitStatus.ERROR + env.log_error( + f'Incomplete download: size={downloader.status.total_size};' + f' downloaded={downloader.status.downloaded}' + ) + return exit_status + + finally: + if downloader and not downloader.finished: + downloader.failed() + if args.output_file and args.output_file_specified: + args.output_file.close() + + if args.http_file: + # TODO: FILE PARSING TO REQUESTS ARRAY + requests_list = http_parser(args.url) + returns = [] + for req in requests_list: + args.url = req.url + args.method = req.method + # args.headers = req.headers + # args.body = req.body + returns.append(actual_program(args, env)) + + return ExitStatus.SUCCESS if all(r is ExitStatus.SUCCESS for r in returns) else ExitStatus.ERROR + + return actual_program(args, env) def print_debug_info(env: Environment): diff --git a/httpie/http_parser.py b/httpie/http_parser.py index ff213d1f..91e029bb 100644 --- a/httpie/http_parser.py +++ b/httpie/http_parser.py @@ -1,136 +1,34 @@ -import argparse -from time import monotonic -from typing import Callable, Iterable - -import requests -# noinspection PyPackageRequirements -import urllib3 - -from httpie.client import ( - build_requests_session, - dump_request, - ensure_path_as_is, - make_request_kwargs, make_send_kwargs, - make_send_kwargs_mergeable_from_env, - max_headers, - transform_headers -) -from . import __version__ -from .cli.constants import HTTP_OPTIONS -from .context import Environment -from .encoding import UTF8 -from .models import RequestsMessage -from .sessions import get_httpie_session -from .uploads import ( - compress_request, -) -from .utils import get_expired_cookies +from dataclasses import dataclass +from pathlib import Path -urllib3.disable_warnings() +@dataclass +class HttpFileRequest: + method: str + url: str + headers: dict + body: bytes -FORM_CONTENT_TYPE = f'application/x-www-form-urlencoded; charset={UTF8}' -JSON_CONTENT_TYPE = 'application/json' -JSON_ACCEPT = f'{JSON_CONTENT_TYPE}, */*;q=0.5' -DEFAULT_UA = f'HTTPie/{__version__}' -IGNORE_CONTENT_LENGTH_METHODS = frozenset([HTTP_OPTIONS]) +def http_parser(filename: str) -> list[HttpFileRequest]: + http_file = Path(filename) + if not http_file.exists(): + raise FileNotFoundError(f"File not found: {filename}") + if not http_file.is_file(): + raise IsADirectoryError(f"Path is not a file: {filename}") + http_contents = http_file.read_text() + http_lines = [ + line for line in http_contents.splitlines() if not line.startswith("#") + ] + http_lines = [line for line in http_lines if line.strip()] + first_line = http_lines[0] + method, url = first_line.split(" ") -def collect_messages( - env: Environment, - args: argparse.Namespace, - request_body_read_callback: Callable[[bytes], None] = None, -) -> Iterable[RequestsMessage]: - httpie_session = None - httpie_session_headers = None - if args.session or args.session_read_only: - httpie_session = get_httpie_session( - env=env, - config_dir=env.config.directory, - session_name=args.session or args.session_read_only, - host=args.headers.get('Host'), - url=args.url, + return [ + HttpFileRequest( + method=method, + url=url, + headers={}, + body=b"", ) - httpie_session_headers = httpie_session.headers - - request_kwargs = make_request_kwargs( - env, - args=args, - base_headers=httpie_session_headers, - request_body_read_callback=request_body_read_callback - ) - send_kwargs = make_send_kwargs(args) - send_kwargs_mergeable_from_env = make_send_kwargs_mergeable_from_env(args) - requests_session = build_requests_session( - ssl_version=args.ssl_version, - ciphers=args.ciphers, - verify=bool(send_kwargs_mergeable_from_env['verify']) - ) - - if httpie_session: - httpie_session.update_headers(request_kwargs['headers']) - requests_session.cookies = httpie_session.cookies - if args.auth_plugin: - # Save auth from CLI to HTTPie session. - httpie_session.auth = { - 'type': args.auth_plugin.auth_type, - 'raw_auth': args.auth_plugin.raw_auth, - } - elif httpie_session.auth: - # Apply auth from HTTPie session - request_kwargs['auth'] = httpie_session.auth - - if args.debug: - # TODO: reflect the split between request and send kwargs. - dump_request(request_kwargs) - - request = requests.Request(**request_kwargs) - prepared_request = requests_session.prepare_request(request) - transform_headers(request, prepared_request) - if args.path_as_is: - prepared_request.url = ensure_path_as_is( - orig_url=args.url, - prepped_url=prepared_request.url, - ) - if args.compress and prepared_request.body: - compress_request( - request=prepared_request, - always=args.compress > 1, - ) - response_count = 0 - expired_cookies = [] - while prepared_request: - yield prepared_request - if not args.offline: - send_kwargs_merged = requests_session.merge_environment_settings( - url=prepared_request.url, - **send_kwargs_mergeable_from_env, - ) - with max_headers(args.max_headers): - response = requests_session.send( - request=prepared_request, - **send_kwargs_merged, - **send_kwargs, - ) - response._httpie_headers_parsed_at = monotonic() - expired_cookies += get_expired_cookies( - response.headers.get('Set-Cookie', '') - ) - - response_count += 1 - if response.next: - if args.max_redirects and response_count == args.max_redirects: - raise requests.TooManyRedirects - if args.follow: - prepared_request = response.next - if args.all: - yield response - continue - yield response - break - - if httpie_session: - if httpie_session.is_new() or not args.session_read_only: - httpie_session.cookies = requests_session.cookies - httpie_session.remove_cookies(expired_cookies) - httpie_session.save() + ] From 4c7513dbc04882638519a41ea4cd775ff87b01b2 Mon Sep 17 00:00:00 2001 From: Coli Alessandro Date: Thu, 27 Feb 2025 15:50:53 +0100 Subject: [PATCH 07/16] [dev] added parser API (#8) --- httpie/http_parser.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/httpie/http_parser.py b/httpie/http_parser.py index 91e029bb..c16ba9a0 100644 --- a/httpie/http_parser.py +++ b/httpie/http_parser.py @@ -11,6 +11,35 @@ class HttpFileRequest: def http_parser(filename: str) -> list[HttpFileRequest]: + + def extract_headers(raw_text: str) -> dict : + ''' + Extract the headers of the .http file + + Args: + raw_text: the lines of the .http file containing the headers + + Returns: + dict: containing the parsed headers + ''' + return None + + def parse_body(raw_text: str) -> dict : + ''' + parse the body of the .http file + ''' + return None + + def parse_single_request(raw_text: str) -> HttpFileRequest: + '''Parse a single request from .http file format to HttpFileRequest ''' + + return HttpFileRequest( + method=method, + url=url, + headers={}, + body=b"", + ) + http_file = Path(filename) if not http_file.exists(): raise FileNotFoundError(f"File not found: {filename}") From 3b640e537a3ce6e175b4c91065551ce8e0debb59 Mon Sep 17 00:00:00 2001 From: Coli Alessandro Date: Thu, 27 Feb 2025 16:11:12 +0100 Subject: [PATCH 08/16] [dev] implements parse_single_request (#1,#3,#6) split the .http file into blocks gets method and URL from requests cleans comments fix API's --- httpie/http_parser.py | 51 ++++++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/httpie/http_parser.py b/httpie/http_parser.py index c16ba9a0..d6154145 100644 --- a/httpie/http_parser.py +++ b/httpie/http_parser.py @@ -12,7 +12,7 @@ class HttpFileRequest: def http_parser(filename: str) -> list[HttpFileRequest]: - def extract_headers(raw_text: str) -> dict : + def extract_headers(raw_text: list[str]) -> dict : ''' Extract the headers of the .http file @@ -22,22 +22,40 @@ def http_parser(filename: str) -> list[HttpFileRequest]: Returns: dict: containing the parsed headers ''' - return None + return {} - def parse_body(raw_text: str) -> dict : + def parse_body(raw_text: str) -> bytes : ''' parse the body of the .http file ''' - return None + return b"" def parse_single_request(raw_text: str) -> HttpFileRequest: '''Parse a single request from .http file format to HttpFileRequest ''' + lines = raw_text.strip().splitlines() + + lines = [line.strip() for line in lines if not line.strip().startswith("#")] + + method, url = lines[0].split(" ") + + raw_headers = [] + raw_body = [] + is_body = False + + for line in lines[1:]: + if not line.strip(): + is_body = True + continue + if not is_body: + raw_headers.append(line) + else: + raw_body.append(line) return HttpFileRequest( method=method, url=url, - headers={}, - body=b"", + headers=extract_headers(raw_headers), + body=parse_body("\n".join(raw_body)), ) http_file = Path(filename) @@ -46,18 +64,11 @@ def http_parser(filename: str) -> list[HttpFileRequest]: if not http_file.is_file(): raise IsADirectoryError(f"Path is not a file: {filename}") http_contents = http_file.read_text() - http_lines = [ - line for line in http_contents.splitlines() if not line.startswith("#") - ] - http_lines = [line for line in http_lines if line.strip()] - first_line = http_lines[0] - method, url = first_line.split(" ") + + raw_requests = http_contents.split("###") + parsed_requests = [] + + for raw_req in raw_requests: + parsed_requests.append(parse_single_request(raw_req)) - return [ - HttpFileRequest( - method=method, - url=url, - headers={}, - body=b"", - ) - ] + return parsed_requests From e984c574a0c00b757c029b92977dd90772061a3c Mon Sep 17 00:00:00 2001 From: RuriThomas <116952548+RuriThomas@users.noreply.github.com> Date: Thu, 27 Feb 2025 17:57:57 +0100 Subject: [PATCH 09/16] [dev] added multiple functions: to split the file into single requests (#6), to search for dependencies (#9), to extract the name of a request (#10), to replace the global values (#4). Also added some attributes to the Request class (#7) --- httpie/http_parser.py | 45 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/httpie/http_parser.py b/httpie/http_parser.py index d6154145..8d32c6b5 100644 --- a/httpie/http_parser.py +++ b/httpie/http_parser.py @@ -1,16 +1,57 @@ from dataclasses import dataclass from pathlib import Path +import re +from __future__ import annotations @dataclass class HttpFileRequest: method: str url: str - headers: dict - body: bytes + headers: dict | None + body: bytes | None + dependencies: list[HttpFileRequest] | None + name: str | None def http_parser(filename: str) -> list[HttpFileRequest]: + + def split_requests(http_file_contents:str) -> list[str]: + """makes a dictionnary from the raw http file that breaks it down into individual requests and returns a dictionary of their names """ + return re.split(r"^###", http_file_contents, re.MULTILINE) + + def get_dependencies(raw_http_request:str, poss_names: list[str]) -> list[str] | None: + """returns a list of all the names of the requests that must be fufilled before this one can be sent""" + pattern = r"\{\{(.*?)\}\}" + matches = re.findall(pattern, raw_http_request) + if len(matches) == 0: + return None + names = [re.findall(r"^([A-Za-z0-9_]+).", match, re.MULTILINE) for match in matches] + flat_names = [match for sublist in names for match in sublist] + if not all(name in poss_names for name in flat_names): + # TODO error not all dependencies exist + return None + return flat_names + + def get_name(raw_http_request:str) -> str | None: + """returns the name of the http request if it has one, None otherwise""" + matches = re.findall(r"^((//)|(#)) @name (.+)", raw_http_request, re.MULTILINE) + if len(matches) == 0: + return None + elif len(matches) == 1: + return matches[0] + else: + # TODO error too many names + return None + + def replace_global(http_file_contents_raw:str) -> str: + """finds and replaces all global variables by their values""" + # possible error when @variable=value is in the body + matches = re.findall(r"^@([A-Za-z0-9_]+)=(.+)$", http_file_contents_raw, re.MULTILINE) + http_file_contents_cooking = http_file_contents_raw + for variableName, value in matches: + http_file_contents_cooking = re.sub(rf"{{{{({re.escape(variableName)})}}}}",value , http_file_contents_cooking) + return http_file_contents_cooking def extract_headers(raw_text: list[str]) -> dict : ''' From 7af719e0c5aba314b4ce7c8a074635e45968042f Mon Sep 17 00:00:00 2001 From: Coli Alessandro Date: Sun, 2 Mar 2025 17:37:17 +0100 Subject: [PATCH 10/16] [dev] get header function (#2) also brings other function in the logic --- httpie/http_parser.py | 45 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/httpie/http_parser.py b/httpie/http_parser.py index 8d32c6b5..da8ed694 100644 --- a/httpie/http_parser.py +++ b/httpie/http_parser.py @@ -1,8 +1,7 @@ +from __future__ import annotations from dataclasses import dataclass from pathlib import Path import re -from __future__ import annotations - @dataclass class HttpFileRequest: @@ -16,9 +15,18 @@ class HttpFileRequest: def http_parser(filename: str) -> list[HttpFileRequest]: - def split_requests(http_file_contents:str) -> list[str]: - """makes a dictionnary from the raw http file that breaks it down into individual requests and returns a dictionary of their names """ - return re.split(r"^###", http_file_contents, re.MULTILINE) + def split_requests(http_file_contents: str) -> list[str]: + """Splits an HTTP file into individual requests but keeps the '###' in each request.""" + parts = re.split(r"(^###.*)", http_file_contents, flags=re.MULTILINE) + requests = [] + + for i in range(1, len(parts), 2): + header = parts[i].strip() + body = parts[i + 1].strip() if i + 1 < len(parts) else "" + requests.append(f"{header}\n{body}") + + return requests + def get_dependencies(raw_http_request:str, poss_names: list[str]) -> list[str] | None: """returns a list of all the names of the requests that must be fufilled before this one can be sent""" @@ -63,7 +71,17 @@ def http_parser(filename: str) -> list[HttpFileRequest]: Returns: dict: containing the parsed headers ''' - return {} + headers = {} + + for line in raw_text: + if not line.strip() or ':' not in line: + continue + + header_name, header_value = line.split(':', 1) + + headers[header_name.strip()] = header_value.strip() + + return headers def parse_body(raw_text: str) -> bytes : ''' @@ -97,6 +115,8 @@ def http_parser(filename: str) -> list[HttpFileRequest]: url=url, headers=extract_headers(raw_headers), body=parse_body("\n".join(raw_body)), + dependencies={}, + name=get_name(raw_text) ) http_file = Path(filename) @@ -106,10 +126,17 @@ def http_parser(filename: str) -> list[HttpFileRequest]: raise IsADirectoryError(f"Path is not a file: {filename}") http_contents = http_file.read_text() - raw_requests = http_contents.split("###") + raw_requests = split_requests(replace_global(http_contents)) + raw_requests = [req.strip() for req in raw_requests if req.strip()] parsed_requests = [] - + req_names = [] + for raw_req in raw_requests: - parsed_requests.append(parse_single_request(raw_req)) + new_req = parse_single_request(raw_req) + new_req.dependencies = get_dependencies(raw_req,req_names) + if(new_req.name != None): + req_names.append(new_req.name) + + parsed_requests.append(new_req) return parsed_requests From b4327fae07790f4f2488da00d01c35f922dd3e65 Mon Sep 17 00:00:00 2001 From: Coli Alessandro Date: Wed, 5 Mar 2025 14:40:36 +0100 Subject: [PATCH 11/16] [refactor] moved loop to program and http-parser functions to be visible from outside the file --- httpie/core.py | 51 +++++++--- httpie/http_parser.py | 217 +++++++++++++++++++----------------------- 2 files changed, 134 insertions(+), 134 deletions(-) diff --git a/httpie/core.py b/httpie/core.py index e76ef9ba..09e768fb 100644 --- a/httpie/core.py +++ b/httpie/core.py @@ -1,6 +1,8 @@ import argparse import os import platform +import random +import string import sys import socket from typing import List, Optional, Union, Callable @@ -15,7 +17,7 @@ from .cli.nested_json import NestedJSONSyntaxError from .client import collect_messages from .context import Environment, LogLevel from .downloads import Downloader -from .http_parser import http_parser +from .http_parser import * from .models import ( RequestsMessageKind, OutputOptions @@ -27,7 +29,7 @@ from .status import ExitStatus, http_status_to_exit_status from .utils import unwrap_context from .internal.update_warnings import check_updates from .internal.daemon_runner import is_daemon_mode, run_daemon_task - +from pathlib import Path # noinspection PyDefaultArgument def raw_main( @@ -272,17 +274,40 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus: args.output_file.close() if args.http_file: - # TODO: FILE PARSING TO REQUESTS ARRAY - requests_list = http_parser(args.url) - returns = [] - for req in requests_list: - args.url = req.url - args.method = req.method - # args.headers = req.headers - # args.body = req.body - returns.append(actual_program(args, env)) - - return ExitStatus.SUCCESS if all(r is ExitStatus.SUCCESS for r in returns) else ExitStatus.ERROR + + http_file = Path(args.url) + if not http_file.exists(): + raise FileNotFoundError(f"File not found: {args.url}") + if not http_file.is_file(): + raise IsADirectoryError(f"Path is not a file: {args.url}") + http_contents = http_file.read_text() + + raw_requests = split_requests(replace_global(http_contents)) + raw_requests = [req.strip() for req in raw_requests if req.strip()] + parsed_requests = [] + req_names = [] + responses = {} + + for raw_req in raw_requests: + new_req = parse_single_request(raw_req) + new_req.dependencies = get_dependencies(raw_req, req_names) + if new_req.name is not None: + req_names.append(new_req.name) + else: + letters = string.ascii_letters + string.digits + new_req.name = ''.join(random.choice(letters) for _ in range(16)) + parsed_requests.append(new_req) + args.url = new_req.url + args.method = new_req.method + args.headers = new_req.headers + args.body = new_req.body + + response = actual_program(args, env) + if new_req.name is not None: + responses[new_req.name] = response + + all_success = all(r is ExitStatus.SUCCESS for r in responses.values()) + return ExitStatus.SUCCESS if all_success else ExitStatus.ERROR return actual_program(args, env) diff --git a/httpie/http_parser.py b/httpie/http_parser.py index da8ed694..287c23a7 100644 --- a/httpie/http_parser.py +++ b/httpie/http_parser.py @@ -1,6 +1,5 @@ from __future__ import annotations from dataclasses import dataclass -from pathlib import Path import re @dataclass @@ -13,130 +12,106 @@ class HttpFileRequest: name: str | None -def http_parser(filename: str) -> list[HttpFileRequest]: +def split_requests(http_file_contents: str) -> list[str]: + """Splits an HTTP file into individual requests but keeps the '###' in each request.""" + parts = re.split(r"(^###.*)", http_file_contents, flags=re.MULTILINE) + requests = [] - def split_requests(http_file_contents: str) -> list[str]: - """Splits an HTTP file into individual requests but keeps the '###' in each request.""" - parts = re.split(r"(^###.*)", http_file_contents, flags=re.MULTILINE) - requests = [] + for i in range(1, len(parts), 2): + header = parts[i].strip() + body = parts[i + 1].strip() if i + 1 < len(parts) else "" + requests.append(f"{header}\n{body}") - for i in range(1, len(parts), 2): - header = parts[i].strip() - body = parts[i + 1].strip() if i + 1 < len(parts) else "" - requests.append(f"{header}\n{body}") - - return requests + return requests - def get_dependencies(raw_http_request:str, poss_names: list[str]) -> list[str] | None: - """returns a list of all the names of the requests that must be fufilled before this one can be sent""" - pattern = r"\{\{(.*?)\}\}" - matches = re.findall(pattern, raw_http_request) - if len(matches) == 0: - return None - names = [re.findall(r"^([A-Za-z0-9_]+).", match, re.MULTILINE) for match in matches] - flat_names = [match for sublist in names for match in sublist] - if not all(name in poss_names for name in flat_names): - # TODO error not all dependencies exist - return None - return flat_names +def get_dependencies(raw_http_request:str, poss_names: list[str]) -> list[str] | None: + """returns a list of all the names of the requests that must be fufilled before this one can be sent""" + pattern = r"\{\{(.*?)\}\}" + matches = re.findall(pattern, raw_http_request) + if len(matches) == 0: + return None + names = [re.findall(r"^([A-Za-z0-9_]+).", match, re.MULTILINE) for match in matches] + flat_names = [match for sublist in names for match in sublist] + if not all(name in poss_names for name in flat_names): + # TODO error not all dependencies exist + return None + return flat_names + +def get_name(raw_http_request:str) -> str | None: + """returns the name of the http request if it has one, None otherwise""" + matches = re.findall(r"^((//)|(#)) @name (.+)", raw_http_request, re.MULTILINE) + if len(matches) == 0: + return None + elif len(matches) == 1: + return matches[0] + else: + # TODO error too many names + return None + +def replace_global(http_file_contents_raw:str) -> str: + """finds and replaces all global variables by their values""" + # possible error when @variable=value is in the body + matches = re.findall(r"^@([A-Za-z0-9_]+)=(.+)$", http_file_contents_raw, re.MULTILINE) + http_file_contents_cooking = http_file_contents_raw + for variableName, value in matches: + http_file_contents_cooking = re.sub(rf"{{{{({re.escape(variableName)})}}}}",value , http_file_contents_cooking) + return http_file_contents_cooking + +def extract_headers(raw_text: list[str]) -> dict : + ''' + Extract the headers of the .http file - def get_name(raw_http_request:str) -> str | None: - """returns the name of the http request if it has one, None otherwise""" - matches = re.findall(r"^((//)|(#)) @name (.+)", raw_http_request, re.MULTILINE) - if len(matches) == 0: - return None - elif len(matches) == 1: - return matches[0] + Args: + raw_text: the lines of the .http file containing the headers + + Returns: + dict: containing the parsed headers + ''' + headers = {} + + for line in raw_text: + if not line.strip() or ':' not in line: + continue + + header_name, header_value = line.split(':', 1) + + headers[header_name.strip()] = header_value.strip() + + return headers + +def parse_body(raw_text: str) -> bytes : + ''' + parse the body of the .http file + ''' + return b"" + +def parse_single_request(raw_text: str) -> HttpFileRequest: + '''Parse a single request from .http file format to HttpFileRequest ''' + lines = raw_text.strip().splitlines() + + lines = [line.strip() for line in lines if not line.strip().startswith("#")] + + method, url = lines[0].split(" ") + + raw_headers = [] + raw_body = [] + is_body = False + + for line in lines[1:]: + if not line.strip(): + is_body = True + continue + if not is_body: + raw_headers.append(line) else: - # TODO error too many names - return None + raw_body.append(line) - def replace_global(http_file_contents_raw:str) -> str: - """finds and replaces all global variables by their values""" - # possible error when @variable=value is in the body - matches = re.findall(r"^@([A-Za-z0-9_]+)=(.+)$", http_file_contents_raw, re.MULTILINE) - http_file_contents_cooking = http_file_contents_raw - for variableName, value in matches: - http_file_contents_cooking = re.sub(rf"{{{{({re.escape(variableName)})}}}}",value , http_file_contents_cooking) - return http_file_contents_cooking - - def extract_headers(raw_text: list[str]) -> dict : - ''' - Extract the headers of the .http file - - Args: - raw_text: the lines of the .http file containing the headers - - Returns: - dict: containing the parsed headers - ''' - headers = {} - - for line in raw_text: - if not line.strip() or ':' not in line: - continue - - header_name, header_value = line.split(':', 1) - - headers[header_name.strip()] = header_value.strip() - - return headers - - def parse_body(raw_text: str) -> bytes : - ''' - parse the body of the .http file - ''' - return b"" - - def parse_single_request(raw_text: str) -> HttpFileRequest: - '''Parse a single request from .http file format to HttpFileRequest ''' - lines = raw_text.strip().splitlines() - - lines = [line.strip() for line in lines if not line.strip().startswith("#")] - - method, url = lines[0].split(" ") - - raw_headers = [] - raw_body = [] - is_body = False - - for line in lines[1:]: - if not line.strip(): - is_body = True - continue - if not is_body: - raw_headers.append(line) - else: - raw_body.append(line) - - return HttpFileRequest( - method=method, - url=url, - headers=extract_headers(raw_headers), - body=parse_body("\n".join(raw_body)), - dependencies={}, - name=get_name(raw_text) - ) - - http_file = Path(filename) - if not http_file.exists(): - raise FileNotFoundError(f"File not found: {filename}") - if not http_file.is_file(): - raise IsADirectoryError(f"Path is not a file: {filename}") - http_contents = http_file.read_text() - - raw_requests = split_requests(replace_global(http_contents)) - raw_requests = [req.strip() for req in raw_requests if req.strip()] - parsed_requests = [] - req_names = [] - - for raw_req in raw_requests: - new_req = parse_single_request(raw_req) - new_req.dependencies = get_dependencies(raw_req,req_names) - if(new_req.name != None): - req_names.append(new_req.name) - - parsed_requests.append(new_req) - - return parsed_requests + return HttpFileRequest( + method=method, + url=url, + headers=extract_headers(raw_headers), + body=parse_body("\n".join(raw_body)), + dependencies={}, + name=get_name(raw_text) + ) From 4481bfb3322ff0f5831b2b0579c278b5435d0d78 Mon Sep 17 00:00:00 2001 From: Elias Floreteng <18127101+eliasfloreteng@users.noreply.github.com> Date: Thu, 6 Mar 2025 16:22:23 +0100 Subject: [PATCH 12/16] Parse and pass request body (#15) * Handle and skip empty request segments Co-authored-by: Jakub Rybak * Fix splitting of requests Co-authored-by: Jakub Rybak * Parse and pass request body Co-authored-by: Jakub Rybak * Format files Co-authored-by: Jakub Rybak * Format definition.py to follow code style Co-authored-by: Jakub Rybak --------- Co-authored-by: Jakub Rybak --- httpie/cli/definition.py | 543 ++++++++++++++++++++------------------- httpie/core.py | 154 +++++------ httpie/http_parser.py | 83 +++--- 3 files changed, 414 insertions(+), 366 deletions(-) diff --git a/httpie/cli/definition.py b/httpie/cli/definition.py index 444b4a75..78c72467 100644 --- a/httpie/cli/definition.py +++ b/httpie/cli/definition.py @@ -5,20 +5,39 @@ import textwrap from argparse import FileType from httpie import __doc__, __version__ -from httpie.cli.argtypes import (KeyValueArgType, SessionNameValidator, - SSLCredentials, readable_file_arg, - response_charset_type, response_mime_type) -from httpie.cli.constants import (BASE_OUTPUT_OPTIONS, DEFAULT_FORMAT_OPTIONS, - OUT_REQ_BODY, OUT_REQ_HEAD, OUT_RESP_BODY, - OUT_RESP_HEAD, OUT_RESP_META, OUTPUT_OPTIONS, - OUTPUT_OPTIONS_DEFAULT, PRETTY_MAP, - PRETTY_STDOUT_TTY_ONLY, - SEPARATOR_GROUP_ALL_ITEMS, SEPARATOR_PROXY, - SORTED_FORMAT_OPTIONS_STRING, - UNSORTED_FORMAT_OPTIONS_STRING, RequestType) +from httpie.cli.argtypes import ( + KeyValueArgType, + SessionNameValidator, + SSLCredentials, + readable_file_arg, + response_charset_type, + response_mime_type, +) +from httpie.cli.constants import ( + BASE_OUTPUT_OPTIONS, + DEFAULT_FORMAT_OPTIONS, + OUT_REQ_BODY, + OUT_REQ_HEAD, + OUT_RESP_BODY, + OUT_RESP_HEAD, + OUT_RESP_META, + OUTPUT_OPTIONS, + OUTPUT_OPTIONS_DEFAULT, + PRETTY_MAP, + PRETTY_STDOUT_TTY_ONLY, + SEPARATOR_GROUP_ALL_ITEMS, + SEPARATOR_PROXY, + SORTED_FORMAT_OPTIONS_STRING, + UNSORTED_FORMAT_OPTIONS_STRING, + RequestType, +) from httpie.cli.options import ParserSpec, Qualifiers, to_argparse -from httpie.output.formatters.colors import (AUTO_STYLE, DEFAULT_STYLE, BUNDLED_STYLES, - get_available_styles) +from httpie.output.formatters.colors import ( + AUTO_STYLE, + DEFAULT_STYLE, + BUNDLED_STYLES, + get_available_styles, +) from httpie.plugins.builtin import BuiltinAuthPlugin from httpie.plugins.registry import plugin_manager from httpie.ssl_ import AVAILABLE_SSL_VERSION_ARG_MAPPING, DEFAULT_SSL_CIPHERS_STRING @@ -26,12 +45,12 @@ from httpie.ssl_ import AVAILABLE_SSL_VERSION_ARG_MAPPING, DEFAULT_SSL_CIPHERS_S # Man pages are static (built when making a release). # We use this check to not include generated, system-specific information there (e.g., default --ciphers). -IS_MAN_PAGE = bool(os.environ.get('HTTPIE_BUILDING_MAN_PAGES')) +IS_MAN_PAGE = bool(os.environ.get("HTTPIE_BUILDING_MAN_PAGES")) options = ParserSpec( - 'http', - description=f'{__doc__.strip()} ', + "http", + description=f"{__doc__.strip()} ", epilog=""" For every --OPTION there is also a --no-OPTION that reverts OPTION to its default value. @@ -39,7 +58,7 @@ options = ParserSpec( Suggestions and bug reports are greatly appreciated: https://github.com/httpie/cli/issues """, - source_file=__file__ + source_file=__file__, ) ####################################################################### @@ -47,7 +66,7 @@ options = ParserSpec( ####################################################################### positional_arguments = options.add_group( - 'Positional arguments', + "Positional arguments", description=""" These arguments come after any flags and in the order they are listed here. Only URL is required. @@ -55,11 +74,11 @@ positional_arguments = options.add_group( ) positional_arguments.add_argument( - dest='method', - metavar='METHOD', + dest="method", + metavar="METHOD", nargs=Qualifiers.OPTIONAL, default=None, - short_help='The HTTP method to be used for the request (GET, POST, PUT, DELETE, ...).', + short_help="The HTTP method to be used for the request (GET, POST, PUT, DELETE, ...).", help=""" The HTTP method to be used for the request (GET, POST, PUT, DELETE, ...). @@ -72,9 +91,9 @@ positional_arguments.add_argument( """, ) positional_arguments.add_argument( - dest='url', - metavar='URL', - short_help='The request URL.', + dest="url", + metavar="URL", + short_help="The request URL.", help=""" The request URL. Scheme defaults to 'http://' if the URL does not include one. (You can override this with: --default-scheme=http/https) @@ -87,21 +106,29 @@ positional_arguments.add_argument( """, ) positional_arguments.add_argument( - dest='request_items', - metavar='REQUEST_ITEM', + dest="request_items", + metavar="REQUEST_ITEM", nargs=Qualifiers.ZERO_OR_MORE, default=None, type=KeyValueArgType(*SEPARATOR_GROUP_ALL_ITEMS), short_help=( - 'HTTPie’s request items syntax for specifying HTTP headers, JSON/Form' - 'data, files, and URL parameters.' + "HTTPie’s request items syntax for specifying HTTP headers, JSON/Form" + "data, files, and URL parameters." ), nested_options=[ - ('HTTP Headers', 'Name:Value', 'Arbitrary HTTP header, e.g X-API-Token:123'), - ('URL Parameters', 'name==value', 'Querystring parameter to the URL, e.g limit==50'), - ('Data Fields', 'field=value', 'Data fields to be serialized as JSON (default) or Form Data (with --form)'), - ('Raw JSON Fields', 'field:=json', 'Data field for real JSON types.'), - ('File upload Fields', 'field@/dir/file', 'Path field for uploading a file.'), + ("HTTP Headers", "Name:Value", "Arbitrary HTTP header, e.g X-API-Token:123"), + ( + "URL Parameters", + "name==value", + "Querystring parameter to the URL, e.g limit==50", + ), + ( + "Data Fields", + "field=value", + "Data fields to be serialized as JSON (default) or Form Data (with --form)", + ), + ("Raw JSON Fields", "field:=json", "Data field for real JSON types."), + ("File upload Fields", "field@/dir/file", "Path field for uploading a file."), ], help=r""" Optional key-value pairs to be included in the request. The separator used @@ -148,15 +175,15 @@ positional_arguments.add_argument( # Content type. ####################################################################### -content_types = options.add_group('Predefined content types') +content_types = options.add_group("Predefined content types") content_types.add_argument( - '--json', - '-j', - action='store_const', + "--json", + "-j", + action="store_const", const=RequestType.JSON, - dest='request_type', - short_help='(default) Serialize data items from the command line as a JSON object.', + dest="request_type", + short_help="(default) Serialize data items from the command line as a JSON object.", help=""" (default) Data items from the command line are serialized as a JSON object. The Content-Type and Accept headers are set to application/json @@ -165,12 +192,12 @@ content_types.add_argument( """, ) content_types.add_argument( - '--form', - '-f', - action='store_const', + "--form", + "-f", + action="store_const", const=RequestType.FORM, - dest='request_type', - short_help='Serialize data items from the command line as form field data.', + dest="request_type", + short_help="Serialize data items from the command line as form field data.", help=""" Data items from the command line are serialized as form fields. @@ -181,25 +208,25 @@ content_types.add_argument( """, ) content_types.add_argument( - '--multipart', - action='store_const', + "--multipart", + action="store_const", const=RequestType.MULTIPART, - dest='request_type', + dest="request_type", short_help=( - 'Similar to --form, but always sends a multipart/form-data ' - 'request (i.e., even without files).' - ) + "Similar to --form, but always sends a multipart/form-data " + "request (i.e., even without files)." + ), ) content_types.add_argument( - '--boundary', + "--boundary", short_help=( - 'Specify a custom boundary string for multipart/form-data requests. ' - 'Only has effect only together with --form.' - ) + "Specify a custom boundary string for multipart/form-data requests. " + "Only has effect only together with --form." + ), ) content_types.add_argument( - '--raw', - short_help='Pass raw request data without extra processing.', + "--raw", + short_help="Pass raw request data without extra processing.", help=""" This option allows you to pass raw request data without extra processing (as opposed to the structured request items syntax): @@ -234,14 +261,14 @@ content_types.add_argument( # Content processing. ####################################################################### -processing_options = options.add_group('Content processing options') +processing_options = options.add_group("Content processing options") processing_options.add_argument( - '--compress', - '-x', - action='count', + "--compress", + "-x", + action="count", default=0, - short_help='Compress the content with Deflate algorithm.', + short_help="Compress the content with Deflate algorithm.", help=""" Content compressed (encoded) with Deflate algorithm. The Content-Encoding header is set to deflate. @@ -265,9 +292,9 @@ def format_style_help(available_styles, *, isolation_mode: bool = False): {available_styles} """ if isolation_mode: - text += '\n\n' - text += 'For finding out all available styles in your system, try:\n\n' - text += ' $ http --style\n' + text += "\n\n" + text += "For finding out all available styles in your system, try:\n\n" + text += " $ http --style\n" text += textwrap.dedent(""" The "{auto_style}" style follows your terminal's ANSI color styles. For non-{auto_style} styles to work properly, please make sure that the @@ -278,9 +305,8 @@ def format_style_help(available_styles, *, isolation_mode: bool = False): if isolation_mode: available_styles = sorted(BUNDLED_STYLES) - available_styles_text = '\n'.join( - f' {line.strip()}' - for line in textwrap.wrap(', '.join(available_styles), 60) + available_styles_text = "\n".join( + f" {line.strip()}" for line in textwrap.wrap(", ".join(available_styles), 60) ).strip() return text.format( default=DEFAULT_STYLE, @@ -290,24 +316,24 @@ def format_style_help(available_styles, *, isolation_mode: bool = False): _sorted_kwargs = { - 'action': 'append_const', - 'const': SORTED_FORMAT_OPTIONS_STRING, - 'dest': 'format_options', + "action": "append_const", + "const": SORTED_FORMAT_OPTIONS_STRING, + "dest": "format_options", } _unsorted_kwargs = { - 'action': 'append_const', - 'const': UNSORTED_FORMAT_OPTIONS_STRING, - 'dest': 'format_options', + "action": "append_const", + "const": UNSORTED_FORMAT_OPTIONS_STRING, + "dest": "format_options", } -output_processing = options.add_group('Output processing') +output_processing = options.add_group("Output processing") output_processing.add_argument( - '--pretty', - dest='prettify', + "--pretty", + dest="prettify", default=PRETTY_STDOUT_TTY_ONLY, choices=sorted(PRETTY_MAP.keys()), - short_help='Control the processing of console outputs.', + short_help="Control the processing of console outputs.", help=""" Controls output processing. The value can be "none" to not prettify the output (default for redirected output), "all" to apply both colors @@ -316,12 +342,12 @@ output_processing.add_argument( """, ) output_processing.add_argument( - '--style', - '-s', - dest='style', - metavar='STYLE', + "--style", + "-s", + dest="style", + metavar="STYLE", default=DEFAULT_STYLE, - action='lazy_choices', + action="lazy_choices", getter=get_available_styles, short_help=f'Output coloring style (default is "{DEFAULT_STYLE}").', help_formatter=format_style_help, @@ -330,16 +356,16 @@ output_processing.add_argument( # The closest approx. of the documented resetting to default via --no-