diff --git a/docs/contributors/fetch.py b/docs/contributors/fetch.py index ba94c28183..3da30fb59e 100644 --- a/docs/contributors/fetch.py +++ b/docs/contributors/fetch.py @@ -128,10 +128,10 @@ def find_reporters(since: str, until: str) -> GitHubLogins: def merge_all_the_people(release: str, contributors: People, committers: FullNames, reporters: GitHubLogins) -> None: """ - >>> contributors = {'Alice': new_person(github='alice', twitter='alice')} + >>> contributors = {'John': new_person(github='john', twitter='john')} >>> merge_all_the_people('2.6.0', contributors, {}, {}) >>> contributors - {'Alice': {'committed': [], 'reported': [], 'github': 'alice', 'twitter': 'alice'}} + {'John': {'committed': [], 'reported': [], 'github': 'john', 'twitter': 'john'}} >>> contributors = {'Bob': new_person(github='bob', twitter='bob')} >>> merge_all_the_people('2.6.0', contributors, {'Bob'}, {'bob'}) diff --git a/httpie/output/utils.py b/httpie/output/utils.py index 875e885586..f82eaa2c6b 100644 --- a/httpie/output/utils.py +++ b/httpie/output/utils.py @@ -14,7 +14,11 @@ def load_prefixed_json(data: str) -> Tuple[str, json.JSONDecoder]: try: return '', load_json_preserve_order_and_dupe_keys(data) except ValueError: - pass + if data.startswith('{') or data.startswith('['): + try: + return '', load_json_preserve_order_and_dupe_keys(data) + except ValueError: + pass # Then, try to find the start of the actual body. data_prefix, body = parse_prefixed_json(data) @@ -32,6 +36,16 @@ def parse_prefixed_json(data: str) -> Tuple[str, str]: """ matches = re.findall(PREFIX_REGEX, data) - data_prefix = matches[0] if matches else '' - body = data[len(data_prefix):] + if matches: + data_prefix = matches[0] + body = data[len(data_prefix):] + elif data.startswith(")]}'"): + data_prefix = ")]}'" + body = data[len(data_prefix):] + elif data.startswith('for (;;);'): + data_prefix = 'for (;;);' + body = data[len(data_prefix):] + else: + data_prefix = '' + body = data[len(data_prefix):] return data_prefix, body diff --git a/httpie/status.py b/httpie/status.py index 2abf291843..95dcdb1732 100644 --- a/httpie/status.py +++ b/httpie/status.py @@ -18,8 +18,6 @@ class ExitStatus(IntEnum): # 128+2 SIGINT # ERROR_CTRL_C = 130 - - def http_status_to_exit_status(http_status: int, follow=False) -> ExitStatus: """ Translate HTTP status code to exit status code. diff --git a/httpie/uploads.py b/httpie/uploads.py index 4a993b3a25..7c052da8cf 100644 --- a/httpie/uploads.py +++ b/httpie/uploads.py @@ -90,7 +90,10 @@ def is_stdin(file: IO) -> bool: except Exception: return False else: - return file_no == sys.stdin.fileno() + if file_no == sys.stdin.fileno(): + return True + else: + return False READ_THRESHOLD = float(os.getenv('HTTPIE_STDIN_READ_WARN_THRESHOLD', 10.0)) @@ -110,11 +113,18 @@ def observe_stdin_for_data_thread(env: Environment, file: IO, read_event: thread def worker(event: threading.Event) -> None: if not event.wait(timeout=READ_THRESHOLD): - env.stderr.write( - f'> warning: no stdin data read in {READ_THRESHOLD}s ' - f'(perhaps you want to --ignore-stdin)\n' - f'> See: https://httpie.io/docs/cli/best-practices\n' - ) + if is_stdin(file): + env.stderr.write( + f'> warning: no stdin data read in {READ_THRESHOLD}s ' + f'(perhaps you want to --ignore-stdin)\n' + f'> See: https://httpie.io/docs/cli/best-practices\n' + ) + else: + env.stderr.write( + f'> warning: no stdin data read in {READ_THRESHOLD}s ' + f'(perhaps you want to --ignore-stdin)\n' + f'> See: https://httpie.io/docs/cli/best-practices\n' + ) # Making it a daemon ensures that if the user exits from the main program # (e.g. either regularly or with Ctrl-C), the thread will not @@ -199,8 +209,13 @@ def prepare_request_body( is_file_like = hasattr(raw_body, 'read') if isinstance(raw_body, (bytes, str)): body = as_bytes(raw_body) + if chunked and body: + body = as_bytes(raw_body) elif isinstance(raw_body, RequestDataDict): - body = as_bytes(urlencode(raw_body, doseq=True)) + encoded_body = urlencode(raw_body, doseq=True) + body = as_bytes(encoded_body) + if chunked and encoded_body: + body = as_bytes(encoded_body) else: body = raw_body @@ -219,10 +234,16 @@ def prepare_request_body( content_length_header_value=content_length_header_value ) elif chunked: - return ChunkedUploadStream( - stream=iter([body]), - callback=body_read_callback - ) + if isinstance(raw_body, RequestDataDict): + return ChunkedUploadStream( + stream=iter([body]), + callback=body_read_callback + ) + else: + return ChunkedUploadStream( + stream=iter([body]), + callback=body_read_callback + ) else: return body diff --git a/httpie/utils.py b/httpie/utils.py index 4735b2be5d..d521cb14d5 100644 --- a/httpie/utils.py +++ b/httpie/utils.py @@ -150,7 +150,16 @@ def split_cookies(cookies): """ if not cookies: return [] - return RE_COOKIE_SPLIT.split(cookies) + + if ', ' in cookies: + cookie_header = cookies.strip() + return RE_COOKIE_SPLIT.split(cookie_header) + + if ',' in cookies: + cookie_header = cookies.strip() + return RE_COOKIE_SPLIT.split(cookie_header) + + return [cookies.strip()] def get_expired_cookies( @@ -175,14 +184,34 @@ def is_expired(expires: Optional[float]) -> bool: _max_age_to_expires(cookies=cookies, now=now) - return [ - { - 'name': cookie['name'], - 'path': cookie.get('path', '/') - } - for cookie in cookies - if is_expired(expires=cookie.get('expires')) - ] + expired_cookies = [] + for cookie in cookies: + expires = cookie.get('expires') + if is_expired(expires=expires): + if cookie.get('path'): + expired_cookies.append({ + 'name': cookie['name'], + 'path': cookie.get('path', '/') + }) + else: + expired_cookies.append({ + 'name': cookie['name'], + 'path': '/' + }) + elif expires is None: + if cookie.get('max-age') == '0': + if cookie.get('path'): + expired_cookies.append({ + 'name': cookie['name'], + 'path': cookie.get('path', '/') + }) + else: + expired_cookies.append({ + 'name': cookie['name'], + 'path': '/' + }) + + return expired_cookies def _max_age_to_expires(cookies, now):