2013-03-24 15:23:18 +01:00
|
|
|
from __future__ import division
|
2020-06-15 22:28:04 +02:00
|
|
|
|
2015-01-23 22:04:42 +01:00
|
|
|
import json
|
2019-08-31 15:17:10 +02:00
|
|
|
import mimetypes
|
2020-06-15 22:28:04 +02:00
|
|
|
import time
|
2017-12-28 18:03:13 +01:00
|
|
|
from collections import OrderedDict
|
2020-06-15 22:28:04 +02:00
|
|
|
from http.cookiejar import parse_ns_headers
|
2019-08-31 18:00:03 +02:00
|
|
|
from pprint import pformat
|
2020-06-15 22:28:04 +02:00
|
|
|
from typing import List, Tuple
|
2015-01-23 22:04:42 +01:00
|
|
|
|
2019-08-31 12:09:17 +02:00
|
|
|
import requests.auth
|
|
|
|
|
2015-01-23 22:04:42 +01:00
|
|
|
|
|
|
|
def load_json_preserve_order(s):
|
|
|
|
return json.loads(s, object_pairs_hook=OrderedDict)
|
2013-02-26 15:12:33 +01:00
|
|
|
|
|
|
|
|
2019-08-31 18:00:03 +02:00
|
|
|
def repr_dict(d: dict) -> str:
|
|
|
|
return pformat(d)
|
2016-03-04 18:42:13 +01:00
|
|
|
|
|
|
|
|
2013-03-04 02:35:01 +01:00
|
|
|
def humanize_bytes(n, precision=2):
|
2013-04-10 16:48:18 +02:00
|
|
|
# Author: Doug Latornell
|
|
|
|
# Licence: MIT
|
2019-08-30 10:07:01 +02:00
|
|
|
# URL: https://code.activestate.com/recipes/577081/
|
2013-02-26 15:12:33 +01:00
|
|
|
"""Return a humanized string representation of a number of bytes.
|
|
|
|
|
|
|
|
Assumes `from __future__ import division`.
|
|
|
|
|
|
|
|
>>> humanize_bytes(1)
|
2014-04-24 17:08:40 +02:00
|
|
|
'1 B'
|
|
|
|
>>> humanize_bytes(1024, precision=1)
|
2013-02-26 15:12:33 +01:00
|
|
|
'1.0 kB'
|
2014-04-24 17:08:40 +02:00
|
|
|
>>> humanize_bytes(1024 * 123, precision=1)
|
2013-02-26 15:12:33 +01:00
|
|
|
'123.0 kB'
|
2014-04-24 17:08:40 +02:00
|
|
|
>>> humanize_bytes(1024 * 12342, precision=1)
|
2013-02-26 15:12:33 +01:00
|
|
|
'12.1 MB'
|
2014-04-24 17:08:40 +02:00
|
|
|
>>> humanize_bytes(1024 * 12342, precision=2)
|
2013-02-26 15:12:33 +01:00
|
|
|
'12.05 MB'
|
2014-04-24 17:08:40 +02:00
|
|
|
>>> humanize_bytes(1024 * 1234, precision=2)
|
2013-02-26 15:12:33 +01:00
|
|
|
'1.21 MB'
|
2014-04-24 17:08:40 +02:00
|
|
|
>>> humanize_bytes(1024 * 1234 * 1111, precision=2)
|
2013-02-26 15:12:33 +01:00
|
|
|
'1.31 GB'
|
2014-04-24 17:08:40 +02:00
|
|
|
>>> humanize_bytes(1024 * 1234 * 1111, precision=1)
|
2013-02-26 15:12:33 +01:00
|
|
|
'1.3 GB'
|
|
|
|
|
|
|
|
"""
|
|
|
|
abbrevs = [
|
|
|
|
(1 << 50, 'PB'),
|
|
|
|
(1 << 40, 'TB'),
|
|
|
|
(1 << 30, 'GB'),
|
|
|
|
(1 << 20, 'MB'),
|
|
|
|
(1 << 10, 'kB'),
|
2013-04-13 02:49:27 +02:00
|
|
|
(1, 'B')
|
2013-02-26 15:12:33 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
if n == 1:
|
2013-04-15 05:56:47 +02:00
|
|
|
return '1 B'
|
2013-02-26 15:12:33 +01:00
|
|
|
|
|
|
|
for factor, suffix in abbrevs:
|
|
|
|
if n >= factor:
|
|
|
|
break
|
|
|
|
|
2014-04-27 00:07:13 +02:00
|
|
|
# noinspection PyUnboundLocalVariable
|
2013-02-26 15:12:33 +01:00
|
|
|
return '%.*f %s' % (precision, n / factor, suffix)
|
2019-08-31 12:09:17 +02:00
|
|
|
|
|
|
|
|
|
|
|
class ExplicitNullAuth(requests.auth.AuthBase):
|
|
|
|
"""Forces requests to ignore the ``.netrc``.
|
|
|
|
<https://github.com/psf/requests/issues/2773#issuecomment-174312831>
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __call__(self, r):
|
|
|
|
return r
|
2019-08-31 15:17:10 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_content_type(filename):
|
|
|
|
"""
|
|
|
|
Return the content type for ``filename`` in format appropriate
|
|
|
|
for Content-Type headers, or ``None`` if the file type is unknown
|
|
|
|
to ``mimetypes``.
|
|
|
|
|
|
|
|
"""
|
|
|
|
mime, encoding = mimetypes.guess_type(filename, strict=False)
|
|
|
|
if mime:
|
|
|
|
content_type = mime
|
|
|
|
if encoding:
|
|
|
|
content_type = '%s; charset=%s' % (mime, encoding)
|
|
|
|
return content_type
|
2020-06-15 22:28:04 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_expired_cookies(headers: List[Tuple[str, str]], curr_timestamp: float = None) -> List[dict]:
|
|
|
|
expired_cookies = []
|
|
|
|
cookie_headers = []
|
|
|
|
curr_timestamp = curr_timestamp or time.time()
|
|
|
|
|
|
|
|
for header_name, content in headers:
|
|
|
|
if header_name == 'Set-Cookie':
|
|
|
|
cookie_headers.append(content)
|
|
|
|
|
|
|
|
extracted_cookies = [
|
|
|
|
dict(cookie, name=cookie[0][0])
|
|
|
|
for cookie in parse_ns_headers(cookie_headers)
|
|
|
|
]
|
|
|
|
|
|
|
|
for cookie in extracted_cookies:
|
|
|
|
if "expires" in cookie and cookie['expires'] <= curr_timestamp:
|
|
|
|
expired_cookies.append({
|
|
|
|
'name': cookie['name'],
|
|
|
|
'path': cookie.get('path', '/')
|
|
|
|
})
|
|
|
|
|
|
|
|
return expired_cookies
|