This commit is contained in:
Coli Alessandro 2025-03-07 13:37:04 +00:00 committed by GitHub
commit 5ddded8990
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 1229 additions and 397 deletions

View File

@ -203,6 +203,10 @@ class HTTPieArgumentParser(BaseHTTPieArgumentParser):
}
def _process_url(self):
if self.args.http_file:
# do not add default scheme
# treat URL as a filename if --http-file is specified
return
if self.args.url.startswith('://'):
# Paste URL & add space shortcut: `http ://pie.dev` → `http://pie.dev`
self.args.url = self.args.url[3:]

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ import os
import platform
import sys
import socket
from typing import List, Optional, Union, Callable
from typing import List, Optional, Union, Callable, Iterable, Dict, Tuple
import requests
from pygments import __version__ as pygments_version
@ -12,20 +12,29 @@ from requests import __version__ as requests_version
from . import __version__ as httpie_version
from .cli.constants import OUT_REQ_BODY
from .cli.nested_json import NestedJSONSyntaxError
from .client import collect_messages
from .client import collect_messages, RequestsMessage
from .context import Environment, LogLevel
from .downloads import Downloader
from .models import (
RequestsMessageKind,
OutputOptions
from .http_parser import (
parse_single_request,
replace_global,
split_requests,
replace_dependencies
)
from .models import RequestsMessageKind, OutputOptions
from .output.models import ProcessingOptions
from .output.writer import write_message, write_stream, write_raw_data, MESSAGE_SEPARATOR_BYTES
from .output.writer import (
write_message,
write_stream,
write_raw_data,
MESSAGE_SEPARATOR_BYTES,
)
from .plugins.registry import plugin_manager
from .status import ExitStatus, http_status_to_exit_status
from .utils import unwrap_context
from .internal.update_warnings import check_updates
from .internal.daemon_runner import is_daemon_mode, run_daemon_task
from pathlib import Path
# noinspection PyDefaultArgument
@ -48,27 +57,27 @@ def raw_main(
if use_default_options and env.config.default_options:
args = env.config.default_options + args
include_debug_info = '--debug' in args
include_traceback = include_debug_info or '--traceback' in args
include_debug_info = "--debug" in args
include_traceback = include_debug_info or "--traceback" in args
def handle_generic_error(e, annotation=None):
msg = str(e)
if hasattr(e, 'request'):
if hasattr(e, "request"):
request = e.request
if hasattr(request, 'url'):
if hasattr(request, "url"):
msg = (
f'{msg} while doing a {request.method}'
f' request to URL: {request.url}'
f"{msg} while doing a {request.method}"
f" request to URL: {request.url}"
)
if annotation:
msg += annotation
env.log_error(f'{type(e).__name__}: {msg}')
env.log_error(f"{type(e).__name__}: {msg}")
if include_traceback:
raise
if include_debug_info:
print_debug_info(env)
if args == ['--debug']:
if args == ["--debug"]:
return ExitStatus.SUCCESS
exit_status = ExitStatus.SUCCESS
@ -84,13 +93,13 @@ def raw_main(
raise
exit_status = ExitStatus.ERROR
except KeyboardInterrupt:
env.stderr.write('\n')
env.stderr.write("\n")
if include_traceback:
raise
exit_status = ExitStatus.ERROR_CTRL_C
except SystemExit as e:
if e.code != ExitStatus.SUCCESS:
env.stderr.write('\n')
env.stderr.write("\n")
if include_traceback:
raise
exit_status = ExitStatus.ERROR
@ -102,33 +111,32 @@ def raw_main(
env=env,
)
except KeyboardInterrupt:
env.stderr.write('\n')
env.stderr.write("\n")
if include_traceback:
raise
exit_status = ExitStatus.ERROR_CTRL_C
except SystemExit as e:
if e.code != ExitStatus.SUCCESS:
env.stderr.write('\n')
env.stderr.write("\n")
if include_traceback:
raise
exit_status = ExitStatus.ERROR
except requests.Timeout:
exit_status = ExitStatus.ERROR_TIMEOUT
env.log_error(f'Request timed out ({parsed_args.timeout}s).')
env.log_error(f"Request timed out ({parsed_args.timeout}s).")
except requests.TooManyRedirects:
exit_status = ExitStatus.ERROR_TOO_MANY_REDIRECTS
env.log_error(
f'Too many redirects'
f' (--max-redirects={parsed_args.max_redirects}).'
f"Too many redirects (--max-redirects={parsed_args.max_redirects})."
)
except requests.exceptions.ConnectionError as exc:
annotation = None
original_exc = unwrap_context(exc)
if isinstance(original_exc, socket.gaierror):
if original_exc.errno == socket.EAI_AGAIN:
annotation = '\nCouldnt connect to a DNS server. Please check your connection and try again.'
annotation = "\nCouldnt connect to a DNS server. Please check your connection and try again."
elif original_exc.errno == socket.EAI_NONAME:
annotation = '\nCouldnt resolve the given hostname. Please check the URL and try again.'
annotation = "\nCouldnt resolve the given hostname. Please check the URL and try again."
propagated_exc = original_exc
else:
propagated_exc = exc
@ -144,8 +152,7 @@ def raw_main(
def main(
args: List[Union[str, bytes]] = sys.argv,
env: Environment = Environment()
args: List[Union[str, bytes]] = sys.argv, env: Environment = Environment()
) -> ExitStatus:
"""
The main function.
@ -159,12 +166,7 @@ def main(
from .cli.definition import parser
return raw_main(
parser=parser,
main_program=program,
args=args,
env=env
)
return raw_main(parser=parser, main_program=program, args=args, env=env)
def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
@ -172,6 +174,8 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
The main program without error handling.
"""
def actual_program(args: argparse.Namespace, env: Environment) -> Tuple[ExitStatus, Iterable[RequestsMessage]]:
# TODO: Refactor and drastically simplify, especially so that the separator logic is elsewhere.
exit_status = ExitStatus.SUCCESS
downloader = None
@ -180,7 +184,7 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
processing_options = ProcessingOptions.from_raw_args(args)
def separate():
getattr(env.stdout, 'buffer', env.stdout).write(MESSAGE_SEPARATOR_BYTES)
getattr(env.stdout, "buffer", env.stdout).write(MESSAGE_SEPARATOR_BYTES)
def request_body_read_callback(chunk: bytes):
should_pipe_to_stdout = bool(
@ -196,25 +200,35 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
env,
chunk,
processing_options=processing_options,
headers=initial_request.headers
headers=initial_request.headers,
)
try:
if args.download:
args.follow = True # --download implies --follow.
downloader = Downloader(env, output_file=args.output_file, resume=args.download_resume)
downloader = Downloader(
env, output_file=args.output_file, resume=args.download_resume
)
downloader.pre_request(args.headers)
messages = collect_messages(env, args=args,
request_body_read_callback=request_body_read_callback)
messages = collect_messages(
env, args=args, request_body_read_callback=request_body_read_callback
)
force_separator = False
prev_with_body = False
# Process messages as theyre generated
for message in messages:
output_options = OutputOptions.from_message(message, args.output_options)
output_options = OutputOptions.from_message(
message, args.output_options
)
do_write_body = output_options.body
if prev_with_body and output_options.any() and (force_separator or not env.stdout_isatty):
if (
prev_with_body
and output_options.any()
and (force_separator or not env.stdout_isatty)
):
# Separate after a previous message with body, if needed. See test_tokens.py.
separate()
force_separator = False
@ -228,16 +242,21 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
else:
final_response = message
if args.check_status or downloader:
exit_status = http_status_to_exit_status(http_status=message.status_code, follow=args.follow)
if exit_status != ExitStatus.SUCCESS and (not env.stdout_isatty or args.quiet == 1):
env.log_error(f'HTTP {message.raw.status} {message.raw.reason}', level=LogLevel.WARNING)
exit_status = http_status_to_exit_status(
http_status=message.status_code, follow=args.follow
)
if exit_status != ExitStatus.SUCCESS and (
not env.stdout_isatty or args.quiet == 1
):
env.log_error(
f"HTTP {message.raw.status} {message.raw.reason}",
level=LogLevel.WARNING,
)
write_message(
requests_message=message,
env=env,
output_options=output_options._replace(
body=do_write_body
),
processing_options=processing_options
output_options=output_options._replace(body=do_write_body),
processing_options=processing_options,
)
prev_with_body = output_options.body
@ -255,10 +274,10 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
if downloader.interrupted:
exit_status = ExitStatus.ERROR
env.log_error(
f'Incomplete download: size={downloader.status.total_size};'
f' downloaded={downloader.status.downloaded}'
f"Incomplete download: size={downloader.status.total_size};"
f" downloaded={downloader.status.downloaded}"
)
return exit_status
return exit_status, messages
finally:
if downloader and not downloader.finished:
@ -266,33 +285,67 @@ def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
if args.output_file and args.output_file_specified:
args.output_file.close()
if args.http_file:
http_file = Path(args.url)
if not http_file.exists():
raise FileNotFoundError(f"File not found: {args.url}")
if not http_file.is_file():
raise IsADirectoryError(f"Path is not a file: {args.url}")
http_contents = http_file.read_text()
raw_requests = split_requests(replace_global(http_contents))
raw_requests = [req.strip() for req in raw_requests if req.strip()]
parsed_requests = []
req_names = []
responses: Dict[str, RequestsMessage] = {}
Exit_status = []
for raw_req in raw_requests:
dependency_free_req = replace_dependencies(raw_req, responses)
new_req = parse_single_request(dependency_free_req)
if new_req is None:
continue
if new_req.name is not None:
req_names.append(new_req.name)
parsed_requests.append(new_req)
args.url = new_req.url
args.method = new_req.method
args.headers = new_req.headers
args.data = new_req.body
status, response = actual_program(args, env)
Exit_status.append(status)
if new_req.name is not None:
responses[new_req.name] = response
all_success = all(r is ExitStatus.SUCCESS for r in Exit_status)
return ExitStatus.SUCCESS if all_success else ExitStatus.ERROR
return actual_program(args, env)[0]
def print_debug_info(env: Environment):
env.stderr.writelines([
f'HTTPie {httpie_version}\n',
f'Requests {requests_version}\n',
f'Pygments {pygments_version}\n',
f'Python {sys.version}\n{sys.executable}\n',
f'{platform.system()} {platform.release()}',
])
env.stderr.write('\n\n')
env.stderr.writelines(
[
f"HTTPie {httpie_version}\n",
f"Requests {requests_version}\n",
f"Pygments {pygments_version}\n",
f"Python {sys.version}\n{sys.executable}\n",
f"{platform.system()} {platform.release()}",
]
)
env.stderr.write("\n\n")
env.stderr.write(repr(env))
env.stderr.write('\n\n')
env.stderr.write("\n\n")
env.stderr.write(repr(plugin_manager))
env.stderr.write('\n')
env.stderr.write("\n")
def decode_raw_args(
args: List[Union[str, bytes]],
stdin_encoding: str
) -> List[str]:
def decode_raw_args(args: List[Union[str, bytes]], stdin_encoding: str) -> List[str]:
"""
Convert all bytes args to str
by decoding them using stdin encoding.
"""
return [
arg.decode(stdin_encoding)
if type(arg) is bytes else arg
for arg in args
]
return [arg.decode(stdin_encoding) if type(arg) is bytes else arg for arg in args]

167
httpie/http_parser.py Normal file
View File

@ -0,0 +1,167 @@
from __future__ import annotations
from dataclasses import dataclass
import re
from re import Match
from .client import RequestsMessage
from typing import Iterable, Dict, List
import json
from jsonpath_ng import parse as jsonpath_parse
from lxml import etree
@dataclass
class HttpFileRequest:
method: str
url: str
headers: Dict | None
body: bytes | None
name: str | None
def split_requests(http_file_contents: str) -> List[str]:
"""Splits an HTTP file into individual requests but keeps the '###' in each request."""
parts = re.split(r"(^###.*)", http_file_contents, flags=re.MULTILINE)
requests = []
for i in range(1, len(parts), 2):
header = parts[i].strip()
body = parts[i + 1].strip() if i + 1 < len(parts) else ""
requests.append(f"{header}\n{body}")
return requests
def replace_dependencies(raw_http_request: str, responses: Dict[str, Iterable[RequestsMessage]]) -> str | None:
"""Replaces the dependencies dependent variables in the raw request with their values"""
def replace(match: Match[str]):
"""gives the string which should replaces the one given as a parameter"""
str = match.group(0)
var = str.lstrip("{").rstrip("}")
splitter = re.match(r"(?P<name>\w+)\.(?P<type>request|response)\.(?P<section>body|headers)\.(?P<extractor>.+)", var)
if not splitter:
raise ValueError(f"Difficulties replacing {str} in {raw_http_request}")
Dict = splitter.groupdict()
req_name = Dict["name"]
req_type = Dict["type"]
section = Dict["section"]
extractor = Dict["extractor"]
if responses.get(req_name) is None:
raise ValueError(f"{req_name} is not an existing request's name")
if req_type == "request":
msg = responses[req_name][0]
elif req_type == "response":
msg: RequestsMessage = responses[req_name][1]
if section == "body":
if extractor == "*":
return msg.body # Return full body
elif extractor.startswith("$."): # JSONPath
try:
json_data = msg.json() # Convert response to JSON
jsonpath_expr = jsonpath_parse(extractor)
parsed_data = jsonpath_expr.find(json_data)
return [matched.value for matched in parsed_data] if parsed_data else None
except json.JSONDecodeError:
return None # Not a valid JSON
elif extractor.startswith("/"): # XPath
try:
xml_tree = etree.fromstring(msg.content) # Parse XML
return xml_tree.xpath(extractor)
except etree.XMLSyntaxError:
return None # Not a valid XML
elif section == "headers":
return msg.headers[extractor]
raise ValueError(f"Incoherent request {str}")
pattern = r"\{\{(.*?)\}\}"
return re.sub(pattern, replace, raw_http_request)
def get_name(raw_http_request: str) -> str | None:
"""
Returns the name of the HTTP request if it has one, None otherwise.
The expected pattern is either a comment starting with '//' or '#' (optionally preceded by whitespace)
followed by '@name' and the name.
"""
# Allow leading whitespace before the comment marker.
matches = re.findall(r"^\s*(?://|#)\s*@name\s+(.+)$", raw_http_request, re.MULTILINE)
if len(matches) == 0:
return None
elif len(matches) == 1:
return matches[0].strip() # strip extra whitespace if any
else:
# TODO: Handle error for multiple names found. Currently returns None.
return None
def replace_global(http_file_contents_raw: str) -> str:
"""finds and replaces all global variables by their values"""
# possible error when @variable=value is in the body
matches = re.findall(r"^@([A-Za-z0-9_]+)=(.+)$", http_file_contents_raw, flags=re.MULTILINE)
http_file_contents_cooking = http_file_contents_raw
for variableName, value in matches:
http_file_contents_cooking = re.sub(
rf"{{{{({re.escape(variableName)})}}}}", value, http_file_contents_cooking
)
return http_file_contents_cooking
def extract_headers(raw_text: List[str]) -> Dict:
"""
Extract the headers of the .http file
Args:
raw_text: the lines of the .http file containing the headers
Returns:
Dict: containing the parsed headers
"""
headers = {}
for line in raw_text:
if not line.strip() or ':' not in line:
continue
header_name, header_value = line.split(':', 1)
headers[header_name.strip()] = header_value.strip()
return headers
def parse_body(raw_text: str) -> bytes:
"""
parse the body of the .http file
"""
return b""
def parse_single_request(raw_text: str) -> HttpFileRequest:
"""Parse a single request from .http file format to HttpFileRequest """
lines = raw_text.strip().splitlines()
lines = [line.strip() for line in lines if not line.strip().startswith("#")]
method, url = lines[0].split(" ")
raw_headers = []
raw_body = []
is_body = False
for line in lines[1:]:
if not line.strip():
is_body = True
continue
if not is_body:
raw_headers.append(line)
else:
raw_body.append(line)
return HttpFileRequest(
method=method,
url=url,
headers=extract_headers(raw_headers),
body=parse_body("\n".join(raw_body)),
name=get_name(raw_text)
)

View File

@ -58,6 +58,8 @@ install_requires =
importlib-metadata>=1.4.0; python_version<"3.8"
rich>=9.10.0
colorama>=0.2.4; sys_platform=="win32"
jsonpath_ng
lxml
python_requires = >=3.7

569
tests/test_http_parser.py Normal file
View File

@ -0,0 +1,569 @@
import pytest
import requests
from httpie.http_parser import (
split_requests,
replace_dependencies,
get_name,
replace_global,
extract_headers,
parse_body,
parse_single_request,
)
def normalize_whitespace(text):
"""Removes excessive newlines and spaces for consistent comparison."""
return "\n".join(line.rstrip() for line in text.splitlines()).strip()
# TESTS FOR split_requests -->> REQ_002
def test_split_requests():
# Test case: Multiple HTTP requests
http_file = """### Request 1
GET /users
### Request 2
POST /users
Content-Type: application/json
{"name": "John"}"""
expected_output = [
"### Request 1\nGET /users",
"### Request 2\nPOST /users\nContent-Type: application/json\n\n{\"name\": \"John\"}"
]
assert list(map(normalize_whitespace, split_requests(http_file))) == list(
map(normalize_whitespace, expected_output)
)
def test_split_single_request():
"""
This test ensures that a single HTTP request with a '###' header is correctly parsed
without any unexpected modifications.
"""
http_file = """### Only Request
GET /status"""
expected_output = ["### Only Request\nGET /status"]
assert list(map(normalize_whitespace, split_requests(http_file))) == list(
map(normalize_whitespace, expected_output)
)
def test_split_empty_file():
"""
This test checks if an empty input correctly returns an empty list,
ensuring there are no errors when handling empty strings.
"""
assert split_requests("") == []
def test_split_request_no_body():
"""
This test verifies that requests with no body (only headers and method)
are parsed correctly without adding unnecessary spaces or newlines.
"""
http_file = """### No Body Request
GET /ping"""
expected_output = ["### No Body Request\nGET /ping"]
assert list(map(normalize_whitespace, split_requests(http_file))) == list(
map(normalize_whitespace, expected_output)
)
def test_split_request_with_extra_newlines():
"""
This test ensures that the function correctly handles requests that
contain extra blank lines while preserving necessary formatting.
"""
http_file = """### Request 1
GET /data
### Request 2
POST /submit
{"key": "value"}
"""
expected_output = [
"### Request 1\nGET /data", # Normalized extra newline
"### Request 2\nPOST /submit\n\n{\"key\": \"value\"}" # Normalized newlines inside request
]
assert list(map(normalize_whitespace, split_requests(http_file))) == list(
map(normalize_whitespace, expected_output)
)
def test_split_request_without_header():
"""
This test ensures that requests without a '###' header are ignored and
do not cause the function to fail. The function should return an empty list
in such cases.
"""
http_file = """GET /withoutHeader"""
expected_output = [] # No '###' header means no valid requests should be returned
assert split_requests(http_file) == expected_output
# TESTS FOR get_dependencies -->> REQ_007
def test_replace_dependencies_no_placeholders():
"""
This test verifies that if a request does not contain any {{placeholders}},
the function correctly doesn't change anything.
"""
raw_request = """GET /users"""
assert replace_dependencies(raw_request, None) == """GET /users"""
def test_replace_dependencies_invalid_dependency():
"""
This test ensures that if the request references a dependency that is
not in the provided possible_names list, the function correctly raises an ValueError.
"""
raw_request = """DELETE /items/{{InvalidRequest}}"""
responses = {"Request1": None, "Request2": None}
with pytest.raises(ValueError):
replace_dependencies(raw_request, responses)
def test_replace_dependencies_Req_single():
"""
This test checks that a single valid dependency is correctly extracted
from a request and returned in a list.
"""
raw_request = """GET /update/{{Request1.request.headers.id}}"""
url = "https://api.example.com"
request = requests.Request('GET', url)
response = None
responses = {"Request1": [request, response]}
request.headers["id"] = str(1)
assert replace_dependencies(raw_request, responses) == """GET /update/1"""
def test_replace_dependencies_PreReq_single():
"""
This test checks that a single valid dependency is correctly extracted
from a PreparedRequest and returned in a list.
"""
raw_request = """GET /update/{{Request1.request.headers.id}}"""
url = "https://api.example.com"
session = requests.Session()
request = requests.Request('GET', url)
prepared_request = session.prepare_request(request)
response = None
responses = {"Request1": [prepared_request, response]}
prepared_request.headers["id"] = str(1)
assert replace_dependencies(raw_request, responses) == """GET /update/1"""
def test_replace_multiple_dependencies():
"""
This test verifies that multiple dependencies are correctly identified
and replaced in the request.
"""
raw_request = """GET /update/{{Request1.request.headers.id}}/{{Request1.request.headers.name}}"""
url = "https://api.example.com"
request = requests.Request('GET', url)
response = None
responses = {"Request1": [request, response]}
request.headers["id"] = str(1)
request.headers["name"] = "Jack"
assert replace_dependencies(raw_request, responses) == """GET /update/1/Jack"""
def test_replace_dependencies_empty_request():
"""
This test checks that an empty request string returns None
since there are no placeholders.
"""
raw_request = ""
assert replace_dependencies(raw_request, None) == ""
# TESTS FOR get_name --> REQ_003
def test_get_name_with_hash_comment():
"""
Ensures that get_name correctly extracts a request name
when defined with '#' as a comment.
"""
raw_request = """# @name Request1
GET /users"""
expected_output = "Request1"
assert get_name(raw_request) == expected_output
def test_get_name_with_double_slash_comment():
"""
Ensures that get_name correctly extracts a request name
when defined with '//' as a comment.
"""
raw_request = """// @name GetUser
GET /users/{id}"""
expected_output = "GetUser"
assert get_name(raw_request) == expected_output
def test_get_name_no_name():
"""
Ensures that if no '@name' is present, get_name returns None.
"""
raw_request = """GET /users"""
assert get_name(raw_request) is None
def test_get_name_multiple_names():
"""
Ensures that if multiple '@name' occurrences exist,
the function returns None to indicate an error.
"""
raw_request = """# @name FirstName
GET /users
# @name SecondName
POST /users"""
assert get_name(raw_request) is None # Multiple names should result in None
def test_get_name_with_extra_whitespace():
"""
Ensures that extra spaces around @name do not affect the extracted name.
"""
raw_request = """ # @name MyRequest
GET /data"""
expected_output = "MyRequest"
assert get_name(raw_request) == expected_output
def test_get_name_without_request():
"""
Ensures that a request with only an @name definition still correctly extracts the name.
"""
raw_request = """// @name LoneRequest"""
expected_output = "LoneRequest"
assert get_name(raw_request) == expected_output
def test_get_name_inline_invalid():
"""
Ensures that @name only works when it starts a line,
and does not extract names from inline comments.
"""
raw_request = """GET /users # @name InlineName"""
assert get_name(raw_request) is None # Inline @name should not be detected
def test_get_name_mixed_comment_styles():
"""
Ensures that if multiple valid @name comments exist,
the function returns None to indicate an error.
"""
raw_request = """# @name FirstRequest
// @name SecondRequest
GET /items"""
assert get_name(raw_request) is None
# TESTS FOR replace_global --> REQ_005
def test_replace_global_no_definitions():
"""
Ensures that if no global variable definitions are present,
the file contents remain unchanged.
"""
raw_contents = "GET /users/{{id}}"
expected_output = raw_contents # No replacement should occur
assert replace_global(raw_contents) == expected_output
def test_replace_global_single_variable():
"""
Ensures that a single global variable definition is correctly used to replace
all its corresponding placeholders in the file.
"""
raw_contents = """@host=example.com
GET http://{{host}}/users"""
expected_output = """@host=example.com
GET http://example.com/users"""
assert replace_global(raw_contents) == expected_output
def test_replace_global_multiple_variables():
"""
Ensures that multiple global variable definitions are correctly used to replace
their corresponding placeholders in the file.
"""
raw_contents = """@host=example.com
@port=8080
GET http://{{host}}:{{port}}/users"""
expected_output = """@host=example.com
@port=8080
GET http://example.com:8080/users"""
assert replace_global(raw_contents) == expected_output
def test_replace_global_multiple_occurrences():
"""
Ensures that if a variable appears multiple times in the file,
all occurrences are replaced.
"""
raw_contents = """@name=Test
GET /api?param={{name}}&other={{name}}"""
expected_output = """@name=Test
GET /api?param=Test&other=Test"""
assert replace_global(raw_contents) == expected_output
def test_replace_global_value_with_spaces():
"""
Ensures that global variable definitions with spaces in their values are handled correctly.
"""
raw_contents = """@greeting=Hello World
GET /message?text={{greeting}}"""
expected_output = """@greeting=Hello World
GET /message?text=Hello World"""
assert replace_global(raw_contents) == expected_output
def test_replace_global_definition_without_placeholder():
"""
Ensures that if a global variable is defined but its placeholder is not present,
the file remains unchanged.
"""
raw_contents = """@unused=Value
GET /info"""
expected_output = raw_contents # No replacement should occur
assert replace_global(raw_contents) == expected_output
# TESTS FOR extract_headers --> REQ_003
def test_extract_headers_empty():
"""
Test 1: Empty list should return an empty dictionary.
"""
raw_text = []
expected = {}
assert extract_headers(raw_text) == expected
def test_extract_headers_only_empty_lines():
"""
Test 2: Lines that are empty or only whitespace should be ignored.
"""
raw_text = ["", " ", "\t"]
expected = {}
assert extract_headers(raw_text) == expected
def test_extract_headers_single_header():
"""
Test 3: A single valid header line.
"""
raw_text = ["Content-Type: application/json"]
expected = {"Content-Type": "application/json"}
assert extract_headers(raw_text) == expected
def test_extract_headers_multiple_headers():
"""
Test 4: Multiple header lines should be parsed into a dictionary.
"""
raw_text = [
"Content-Type: application/json",
"Authorization: Bearer token123"
]
expected = {
"Content-Type": "application/json",
"Authorization": "Bearer token123"
}
assert extract_headers(raw_text) == expected
def test_extract_headers_line_without_colon():
"""
Test 5: Lines without a colon should be ignored.
"""
raw_text = [
"This is not a header",
"Content-Length: 123"
]
expected = {"Content-Length": "123"}
assert extract_headers(raw_text) == expected
def test_extract_headers_extra_spaces():
"""
Test 6: Extra whitespace around header names and values should be trimmed.
"""
raw_text = [
" Accept : text/html "
]
expected = {"Accept": "text/html"}
assert extract_headers(raw_text) == expected
def test_extract_headers_multiple_colons():
"""
Test 7: Only the first colon should be used to split the header name and value.
"""
raw_text = [
"Custom-Header: value:with:colons"
]
expected = {"Custom-Header": "value:with:colons"}
assert extract_headers(raw_text) == expected
def test_extract_headers_duplicate_headers():
"""
Test 8: If a header appears more than once, the last occurrence should overwrite previous ones.
"""
raw_text = [
"X-Header: one",
"X-Header: two"
]
expected = {"X-Header": "two"}
assert extract_headers(raw_text) == expected
# TESTS FOR parse_body -->> REQ_002
# TODO: create tests after function definition is done
# TESTS FOR parse_single_request -->> REQ_002
def test_parse_single_request_minimal():
"""
A minimal HTTP request that only contains the request line (method and URL).
Expected:
- method and URL are parsed correctly.
- headers is an empty dict.
- body is empty (after processing by parse_body).
- dependencies is an empty dict.
- name is None (since no @name comment exists).
"""
raw_text = "GET http://example.com"
result = parse_single_request(raw_text)
assert result.method == "GET"
assert result.url == "http://example.com"
assert result.headers == {}
expected_body = parse_body("")
assert result.body == expected_body
assert result.name is None
def test_parse_single_request_with_headers_and_body():
"""
Tests a request that includes a request line, headers, and a body.
Expected:
- Correctly parsed method and URL.
- Headers are extracted into a dictionary.
- The body is passed through parse_body and matches the expected output.
- No @name is defined, so name is None.
"""
raw_text = """POST http://example.com/api
Content-Type: application/json
Authorization: Bearer token
{
"key": "value"
}"""
result = parse_single_request(raw_text)
assert result.method == "POST"
assert result.url == "http://example.com/api"
assert result.headers == {
"Content-Type": "application/json",
"Authorization": "Bearer token"
}
expected_body = parse_body("{\n \"key\": \"value\"\n}")
assert result.body == expected_body
assert result.name is None
def test_parse_single_request_with_name():
"""
Tests a request that includes a @name comment.
The @name line is removed from the parsed lines (since lines starting with '#' are filtered out)
but get_name is still applied on the original raw text.
Expected:
- name is extracted as defined by get_name.
- Other fields (method, URL, headers, body) are parsed normally.
"""
raw_text = """# @name MyTestRequest
GET http://example.com
Content-Type: text/plain
Hello, world!
"""
result = parse_single_request(raw_text)
assert result.method == "GET"
assert result.url == "http://example.com"
assert result.headers == {"Content-Type": "text/plain"}
expected_body = parse_body("Hello, world!")
assert result.body == expected_body
assert result.name == "MyTestRequest"
def test_parse_single_request_extra_blank_lines():
"""
Tests that multiple blank lines (which trigger the switch from headers to body)
are handled properly.
Expected:
- The request line is parsed.
- Headers are extracted before the first blank line.
- Everything after the blank lines is treated as the body.
"""
raw_text = """PUT http://example.com/update
Accept: application/json
Line one of the body.
Line two of the body.
"""
result = parse_single_request(raw_text)
assert result.method == "PUT"
assert result.url == "http://example.com/update"
assert result.headers == {"Accept": "application/json"}
expected_body = parse_body("Line one of the body.\nLine two of the body.")
assert result.body == expected_body
assert result.name is None
def test_parse_single_request_ignore_comments():
"""
Tests that lines starting with '#' (comments) are removed from the parsed headers.
Note: Even if the @name line is a comment, get_name is called on the original raw text,
so it may still extract a name.
Expected:
- Headers only include valid header lines.
- The @name is still extracted if present in the raw text.
"""
raw_text = """# @name CommentedRequest
GET http://example.com/data
# This comment should be ignored
Content-Length: 123
"""
result = parse_single_request(raw_text)
assert result.method == "GET"
assert result.url == "http://example.com/data"
assert result.headers == {"Content-Length": "123"}
expected_body = parse_body("")
assert result.body == expected_body
assert result.name == "CommentedRequest"
if __name__ == "__main__":
pytest.main()