id
stringlengths 10
18
| content
stringlengths 4
768k
| max_stars_repo_path
stringlengths 115
237
|
---|---|---|
pandas-bug-104 | BugsInPy/BugsInPy/temp/projects/pandas/bug-104-fixed/pandas/pandas/core/groupby/groupby.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-104-buggy/pandas/pandas/core/groupby/groupby.py |
|
pandas-bug-97 | BugsInPy/BugsInPy/temp/projects/pandas/bug-97-fixed/pandas/pandas/core/indexes/timedeltas.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-97-buggy/pandas/pandas/core/indexes/timedeltas.py |
|
pandas-bug-76 | BugsInPy/BugsInPy/temp/projects/pandas/bug-76-fixed/pandas/pandas/io/json/_json.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-76-buggy/pandas/pandas/io/json/_json.py |
|
pandas-bug-165 | BugsInPy/BugsInPy/temp/projects/pandas/bug-165-fixed/pandas/pandas/core/arrays/datetimelike.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-165-buggy/pandas/pandas/core/arrays/datetimelike.py |
|
pandas-bug-115 | BugsInPy/BugsInPy/temp/projects/pandas/bug-115-fixed/pandas/pandas/core/missing.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-115-buggy/pandas/pandas/core/missing.py |
|
pandas-bug-148 | BugsInPy/BugsInPy/temp/projects/pandas/bug-148-fixed/pandas/pandas/core/apply.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-148-buggy/pandas/pandas/core/apply.py |
|
pandas-bug-99 | BugsInPy/BugsInPy/temp/projects/pandas/bug-99-fixed/pandas/pandas/core/tools/datetimes.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-99-buggy/pandas/pandas/core/tools/datetimes.py |
|
pandas-bug-30 | BugsInPy/BugsInPy/temp/projects/pandas/bug-30-fixed/pandas/pandas/io/json/_json.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-30-buggy/pandas/pandas/io/json/_json.py |
|
pandas-bug-81 | BugsInPy/BugsInPy/temp/projects/pandas/bug-81-fixed/pandas/pandas/core/arrays/integer.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-81-buggy/pandas/pandas/core/arrays/integer.py |
|
pandas-bug-8 | BugsInPy/BugsInPy/temp/projects/pandas/bug-8-fixed/pandas/pandas/core/internals/blocks.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-8-buggy/pandas/pandas/core/internals/blocks.py |
|
pandas-bug-105 | BugsInPy/BugsInPy/temp/projects/pandas/bug-105-fixed/pandas/pandas/core/frame.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-105-buggy/pandas/pandas/core/frame.py |
|
pandas-bug-27 | BugsInPy/BugsInPy/temp/projects/pandas/bug-27-fixed/pandas/pandas/core/arrays/datetimes.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-27-buggy/pandas/pandas/core/arrays/datetimes.py |
|
pandas-bug-70 | BugsInPy/BugsInPy/temp/projects/pandas/bug-70-fixed/pandas/pandas/core/groupby/groupby.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-70-buggy/pandas/pandas/core/groupby/groupby.py |
|
pandas-bug-40 | BugsInPy/BugsInPy/temp/projects/pandas/bug-40-fixed/pandas/pandas/core/reshape/merge.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-40-buggy/pandas/pandas/core/reshape/merge.py |
|
pandas-bug-160 | BugsInPy/BugsInPy/temp/projects/pandas/bug-160-fixed/pandas/pandas/core/computation/expressions.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-160-buggy/pandas/pandas/core/computation/expressions.py |
|
pandas-bug-85 | BugsInPy/BugsInPy/temp/projects/pandas/bug-85-fixed/pandas/pandas/core/indexes/multi.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-85-buggy/pandas/pandas/core/indexes/multi.py |
|
pandas-bug-22 | BugsInPy/BugsInPy/temp/projects/pandas/bug-22-fixed/pandas/pandas/core/window/common.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-22-buggy/pandas/pandas/core/window/common.py |
|
pandas-bug-84 | BugsInPy/BugsInPy/temp/projects/pandas/bug-84-fixed/pandas/pandas/core/reshape/reshape.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-84-buggy/pandas/pandas/core/reshape/reshape.py |
|
pandas-bug-102 | BugsInPy/BugsInPy/temp/projects/pandas/bug-102-fixed/pandas/pandas/core/internals/construction.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-102-buggy/pandas/pandas/core/internals/construction.py |
|
pandas-bug-74 | BugsInPy/BugsInPy/temp/projects/pandas/bug-74-fixed/pandas/pandas/core/indexes/timedeltas.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-74-buggy/pandas/pandas/core/indexes/timedeltas.py |
|
pandas-bug-34 | BugsInPy/BugsInPy/temp/projects/pandas/bug-34-fixed/pandas/pandas/core/resample.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-34-buggy/pandas/pandas/core/resample.py |
|
pandas-bug-162 | BugsInPy/BugsInPy/temp/projects/pandas/bug-162-fixed/pandas/pandas/core/reshape/pivot.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-162-buggy/pandas/pandas/core/reshape/pivot.py |
|
pandas-bug-29 | BugsInPy/BugsInPy/temp/projects/pandas/bug-29-fixed/pandas/pandas/core/arrays/interval.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-29-buggy/pandas/pandas/core/arrays/interval.py |
|
pandas-bug-149 | BugsInPy/BugsInPy/temp/projects/pandas/bug-149-fixed/pandas/pandas/io/parquet.py,BugsInPy/BugsInPy/temp/projects/pandas/bug-149-buggy/pandas/pandas/io/parquet.py |
|
fastapi-bug-5 | import re
from dataclasses import is_dataclass
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
try:
from pydantic.fields import FieldInfo, ModelField
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[ModelField] = []
responses_from_routes: List[ModelField] = []
callback_flat_models: Set[Type[BaseModel]] = set()
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, ModelField
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
if route.callbacks:
callback_flat_models |= get_flat_models_from_routes(route.callbacks)
flat_models = callback_flat_models | get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: ModelField) -> ModelField:
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model(
original_type.__name__, __config__=original_type.__config__
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
use_type.__validators__ = original_type.__validators__
if PYDANTIC_1:
new_field = ModelField(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
field_info=FieldInfo(None),
)
else: # pragma: nocover
new_field = ModelField( # type: ignore
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=FieldInfo(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
import re
from dataclasses import is_dataclass
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
try:
from pydantic.fields import FieldInfo, ModelField
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[ModelField] = []
responses_from_routes: List[ModelField] = []
callback_flat_models: Set[Type[BaseModel]] = set()
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, ModelField
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
if route.callbacks:
callback_flat_models |= get_flat_models_from_routes(route.callbacks)
flat_models = callback_flat_models | get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: ModelField) -> ModelField:
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model(
original_type.__name__, __config__=original_type.__config__
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(f)
use_type.__validators__ = original_type.__validators__
if PYDANTIC_1:
new_field = ModelField(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
field_info=FieldInfo(None),
)
else: # pragma: nocover
new_field = ModelField( # type: ignore
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=FieldInfo(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-5-fixed/fastapi/fastapi/utils.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-5-buggy/fastapi/fastapi/utils.py |
fastapi-bug-12 | import binascii
from base64 import b64decode
from typing import Optional
from fastapi.exceptions import HTTPException
from fastapi.openapi.models import (
HTTPBase as HTTPBaseModel,
HTTPBearer as HTTPBearerModel,
)
from fastapi.security.base import SecurityBase
from fastapi.security.utils import get_authorization_scheme_param
from pydantic import BaseModel
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
class HTTPBasicCredentials(BaseModel):
username: str
password: str
class HTTPAuthorizationCredentials(BaseModel):
scheme: str
credentials: str
class HTTPBase(SecurityBase):
def __init__(
self, *, scheme: str, scheme_name: str = None, auto_error: bool = True
):
self.model = HTTPBaseModel(scheme=scheme)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
class HTTPBasic(HTTPBase):
def __init__(
self, *, scheme_name: str = None, realm: str = None, auto_error: bool = True
):
self.model = HTTPBaseModel(scheme="basic")
self.scheme_name = scheme_name or self.__class__.__name__
self.realm = realm
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[HTTPBasicCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, param = get_authorization_scheme_param(authorization)
if self.realm:
unauthorized_headers = {"WWW-Authenticate": f'Basic realm="{self.realm}"'}
else:
unauthorized_headers = {"WWW-Authenticate": "Basic"}
invalid_user_credentials_exc = HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers=unauthorized_headers,
)
if not authorization or scheme.lower() != "basic":
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers=unauthorized_headers,
)
else:
return None
try:
data = b64decode(param).decode("ascii")
except (ValueError, UnicodeDecodeError, binascii.Error):
raise invalid_user_credentials_exc
username, separator, password = data.partition(":")
if not (separator):
raise invalid_user_credentials_exc
return HTTPBasicCredentials(username=username, password=password)
class HTTPBearer(HTTPBase):
def __init__(
self,
*,
bearerFormat: str = None,
scheme_name: str = None,
auto_error: bool = True,
):
self.model = HTTPBearerModel(bearerFormat=bearerFormat)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
if scheme.lower() != "bearer":
raise HTTPException(
status_code=HTTP_403_FORBIDDEN,
detail="Invalid authentication credentials",
)
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
class HTTPDigest(HTTPBase):
def __init__(self, *, scheme_name: str = None, auto_error: bool = True):
self.model = HTTPBaseModel(scheme="digest")
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
if scheme.lower() != "digest":
raise HTTPException(
status_code=HTTP_403_FORBIDDEN,
detail="Invalid authentication credentials",
)
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
import binascii
from base64 import b64decode
from typing import Optional
from fastapi.exceptions import HTTPException
from fastapi.openapi.models import (
HTTPBase as HTTPBaseModel,
HTTPBearer as HTTPBearerModel,
)
from fastapi.security.base import SecurityBase
from fastapi.security.utils import get_authorization_scheme_param
from pydantic import BaseModel
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
class HTTPBasicCredentials(BaseModel):
username: str
password: str
class HTTPAuthorizationCredentials(BaseModel):
scheme: str
credentials: str
class HTTPBase(SecurityBase):
def __init__(
self, *, scheme: str, scheme_name: str = None, auto_error: bool = True
):
self.model = HTTPBaseModel(scheme=scheme)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
class HTTPBasic(HTTPBase):
def __init__(
self, *, scheme_name: str = None, realm: str = None, auto_error: bool = True
):
self.model = HTTPBaseModel(scheme="basic")
self.scheme_name = scheme_name or self.__class__.__name__
self.realm = realm
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[HTTPBasicCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, param = get_authorization_scheme_param(authorization)
if self.realm:
unauthorized_headers = {"WWW-Authenticate": f'Basic realm="{self.realm}"'}
else:
unauthorized_headers = {"WWW-Authenticate": "Basic"}
invalid_user_credentials_exc = HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers=unauthorized_headers,
)
if not authorization or scheme.lower() != "basic":
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers=unauthorized_headers,
)
else:
return None
try:
data = b64decode(param).decode("ascii")
except (ValueError, UnicodeDecodeError, binascii.Error):
raise invalid_user_credentials_exc
username, separator, password = data.partition(":")
if not (separator):
raise invalid_user_credentials_exc
return HTTPBasicCredentials(username=username, password=password)
class HTTPBearer(HTTPBase):
def __init__(
self,
*,
bearerFormat: str = None,
scheme_name: str = None,
auto_error: bool = True,
):
self.model = HTTPBearerModel(bearerFormat=bearerFormat)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
if scheme.lower() != "bearer":
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN,
detail="Invalid authentication credentials",
)
else:
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
class HTTPDigest(HTTPBase):
def __init__(self, *, scheme_name: str = None, auto_error: bool = True):
self.model = HTTPBaseModel(scheme="digest")
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization: str = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
if scheme.lower() != "digest":
raise HTTPException(
status_code=HTTP_403_FORBIDDEN,
detail="Invalid authentication credentials",
)
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-12-fixed/fastapi/fastapi/security/http.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-12-buggy/fastapi/fastapi/security/http.py |
fastapi-bug-3 | import asyncio
import inspect
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.logger import logger
from fastapi.openapi.constants import STATUS_CODES_WITH_NO_BODY
from fastapi.utils import (
PYDANTIC_1,
create_cloned_field,
create_response_field,
generate_operation_id_for_path,
get_field_info,
warning_response_model_skip_defaults_deprecated,
)
from pydantic import BaseModel
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Mount # noqa
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
try:
from pydantic.fields import FieldInfo, ModelField
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic import Schema as FieldInfo # type: ignore
from pydantic.fields import Field as ModelField # type: ignore
async def serialize_response(
*,
field: ModelField = None,
response_content: Any,
include: Union[SetIntStr, DictIntStrAny] = None,
exclude: Union[SetIntStr, DictIntStrAny] = set(),
by_alias: bool = True,
exclude_unset: bool = False,
is_coroutine: bool = True,
) -> Any:
if field:
errors = []
if exclude_unset and isinstance(response_content, BaseModel):
if PYDANTIC_1:
response_content = response_content.dict(exclude_unset=exclude_unset)
else:
response_content = response_content.dict(
skip_defaults=exclude_unset
) # pragma: nocover
if is_coroutine:
value, errors_ = field.validate(response_content, {}, loc=("response",))
else:
value, errors_ = await run_in_threadpool(
field.validate, response_content, {}, loc=("response",)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
)
else:
return jsonable_encoder(response_content)
async def run_endpoint_function(
*, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool
) -> Any:
# Only called by get_request_handler. Has been split into its own function to
# facilitate profiling endpoints, since inner functions are harder to profile.
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
return await dependant.call(**values)
else:
return await run_in_threadpool(dependant.call, **values)
def get_request_handler(
dependant: Dependant,
body_field: ModelField = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: ModelField = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(get_field_info(body_field), params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logger.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors, body=body)
else:
raw_response = await run_endpoint_function(
dependant=dependant, values=values, is_coroutine=is_coroutine
)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = await serialize_response(
field=response_field,
response_content=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
exclude_unset=response_model_exclude_unset,
is_coroutine=is_coroutine,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Optional[Type[Response]] = None,
dependency_overrides_provider: Any = None,
callbacks: Optional[List["APIRoute"]] = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
assert (
status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {status_code} must not have a response body"
response_name = "Response_" + self.unique_id
self.response_field = create_response_field(
name=response_name, type_=self.response_model
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[
ModelField
] = create_cloned_field(self.response_field)
else:
self.response_field = None # type: ignore
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0]
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert (
additional_status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {additional_status_code} must not have a response body"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = create_response_field(name=response_name, type_=model)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], ModelField] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_exclude_unset = response_model_exclude_unset
self.include_in_schema = include_in_schema
self.response_class = response_class
assert callable(endpoint), f"An endpoint must be a callable"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.callbacks = callbacks
self.app = request_response(self.get_route_handler())
def get_route_handler(self) -> Callable:
return get_request_handler(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class or JSONResponse,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_exclude_unset=self.response_model_exclude_unset,
dependency_overrides_provider=self.dependency_overrides_provider,
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
route_class: Type[APIRoute] = APIRoute,
default_response_class: Type[Response] = None,
on_startup: Sequence[Callable] = None,
on_shutdown: Sequence[Callable] = None,
) -> None:
super().__init__(
routes=routes,
redirect_slashes=redirect_slashes,
default=default,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
self.default_response_class = default_response_class
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
route_class_override: Optional[Type[APIRoute]] = None,
callbacks: List[APIRoute] = None,
) -> None:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
route_class = route_class_override or self.route_class
route = route_class(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
callbacks=callbacks,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(path, endpoint=endpoint, name=name)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_exclude_unset=route.response_model_exclude_unset,
include_in_schema=route.include_in_schema,
response_class=route.response_class or default_response_class,
name=route.name,
route_class_override=type(route),
callbacks=route.callbacks,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
for handler in router.on_startup:
self.add_event_handler("startup", handler)
for handler in router.on_shutdown:
self.add_event_handler("shutdown", handler)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
import asyncio
import inspect
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.logger import logger
from fastapi.openapi.constants import STATUS_CODES_WITH_NO_BODY
from fastapi.utils import (
PYDANTIC_1,
create_cloned_field,
create_response_field,
generate_operation_id_for_path,
get_field_info,
warning_response_model_skip_defaults_deprecated,
)
from pydantic import BaseModel
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Mount # noqa
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
try:
from pydantic.fields import FieldInfo, ModelField
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic import Schema as FieldInfo # type: ignore
from pydantic.fields import Field as ModelField # type: ignore
def _prepare_response_content(
res: Any, *, by_alias: bool = True, exclude_unset: bool
) -> Any:
if isinstance(res, BaseModel):
if PYDANTIC_1:
return res.dict(by_alias=by_alias, exclude_unset=exclude_unset)
else:
return res.dict(
by_alias=by_alias, skip_defaults=exclude_unset
) # pragma: nocover
elif isinstance(res, list):
return [
_prepare_response_content(item, exclude_unset=exclude_unset) for item in res
]
elif isinstance(res, dict):
return {
k: _prepare_response_content(v, exclude_unset=exclude_unset)
for k, v in res.items()
}
return res
async def serialize_response(
*,
field: ModelField = None,
response_content: Any,
include: Union[SetIntStr, DictIntStrAny] = None,
exclude: Union[SetIntStr, DictIntStrAny] = set(),
by_alias: bool = True,
exclude_unset: bool = False,
is_coroutine: bool = True,
) -> Any:
if field:
errors = []
response_content = _prepare_response_content(
response_content, by_alias=by_alias, exclude_unset=exclude_unset
)
if is_coroutine:
value, errors_ = field.validate(response_content, {}, loc=("response",))
else:
value, errors_ = await run_in_threadpool(
field.validate, response_content, {}, loc=("response",)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
)
else:
return jsonable_encoder(response_content)
async def run_endpoint_function(
*, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool
) -> Any:
# Only called by get_request_handler. Has been split into its own function to
# facilitate profiling endpoints, since inner functions are harder to profile.
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
return await dependant.call(**values)
else:
return await run_in_threadpool(dependant.call, **values)
def get_request_handler(
dependant: Dependant,
body_field: ModelField = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: ModelField = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(get_field_info(body_field), params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logger.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors, body=body)
else:
raw_response = await run_endpoint_function(
dependant=dependant, values=values, is_coroutine=is_coroutine
)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = await serialize_response(
field=response_field,
response_content=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
exclude_unset=response_model_exclude_unset,
is_coroutine=is_coroutine,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Optional[Type[Response]] = None,
dependency_overrides_provider: Any = None,
callbacks: Optional[List["APIRoute"]] = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
assert (
status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {status_code} must not have a response body"
response_name = "Response_" + self.unique_id
self.response_field = create_response_field(
name=response_name, type_=self.response_model
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[
ModelField
] = create_cloned_field(self.response_field)
else:
self.response_field = None # type: ignore
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0]
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert (
additional_status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {additional_status_code} must not have a response body"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = create_response_field(name=response_name, type_=model)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], ModelField] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_exclude_unset = response_model_exclude_unset
self.include_in_schema = include_in_schema
self.response_class = response_class
assert callable(endpoint), f"An endpoint must be a callable"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.callbacks = callbacks
self.app = request_response(self.get_route_handler())
def get_route_handler(self) -> Callable:
return get_request_handler(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class or JSONResponse,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_exclude_unset=self.response_model_exclude_unset,
dependency_overrides_provider=self.dependency_overrides_provider,
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
route_class: Type[APIRoute] = APIRoute,
default_response_class: Type[Response] = None,
on_startup: Sequence[Callable] = None,
on_shutdown: Sequence[Callable] = None,
) -> None:
super().__init__(
routes=routes,
redirect_slashes=redirect_slashes,
default=default,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
self.default_response_class = default_response_class
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
route_class_override: Optional[Type[APIRoute]] = None,
callbacks: List[APIRoute] = None,
) -> None:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
route_class = route_class_override or self.route_class
route = route_class(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
callbacks=callbacks,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(path, endpoint=endpoint, name=name)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_exclude_unset=route.response_model_exclude_unset,
include_in_schema=route.include_in_schema,
response_class=route.response_class or default_response_class,
name=route.name,
route_class_override=type(route),
callbacks=route.callbacks,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
for handler in router.on_startup:
self.add_event_handler("startup", handler)
for handler in router.on_shutdown:
self.add_event_handler("shutdown", handler)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-3-fixed/fastapi/fastapi/routing.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-3-buggy/fastapi/fastapi/routing.py |
fastapi-bug-15 | import asyncio
import inspect
import logging
from typing import Any, Callable, List, Optional, Type
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import get_body_field, get_dependant, solve_dependencies
from fastapi.encoders import jsonable_encoder
from fastapi.utils import UnconstrainedConfig
from pydantic import BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import compile_path, get_name, request_response
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
def serialize_response(*, field: Field = None, response: Response) -> Any:
encoded = jsonable_encoder(response)
if field:
errors = []
value, errors_ = field.validate(encoded, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors)
return jsonable_encoder(value)
else:
return encoded
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
content_type: Type[Response] = JSONResponse,
response_field: Field = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must me a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
raw_body = await request.form()
form_fields = {}
for field, value in raw_body.items():
form_fields[field] = value
if form_fields:
body = form_fields
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error("Error getting request body", e)
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
)
values, errors = await solve_dependencies(
request=request, dependant=dependant, body=body
)
if errors:
errors_out = ValidationError(errors)
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY, detail=errors_out.errors()
)
else:
assert dependant.call is not None, "dependant.call must me a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
return raw_response
response_data = serialize_response(
field=response_field, response=raw_response
)
return content_type(content=response_data, status_code=status_code)
return app
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
name: str = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
) -> None:
assert path.startswith("/"), "Routed paths must always start with '/'"
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.response_model = response_model
if self.response_model:
assert lenient_issubclass(
content_type, JSONResponse
), "To declare a type the response must be a JSON response"
response_name = "Response_" + self.name
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators=[],
default=None,
required=False,
model_config=UnconstrainedConfig,
schema=Schema(None),
)
else:
self.response_field = None
self.status_code = status_code
self.tags = tags or []
self.summary = summary
self.description = description or self.endpoint.__doc__
self.response_description = response_description
self.deprecated = deprecated
if methods is None:
methods = ["GET"]
self.methods = methods
self.operation_id = operation_id
self.include_in_schema = include_in_schema
self.content_type = content_type
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=path, call=self.endpoint)
self.body_field = get_body_field(dependant=self.dependant, name=self.name)
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
content_type=self.content_type,
response_field=self.response_field,
)
)
class APIRouter(routing.Router):
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> None:
route = APIRoute(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
return func
return decorator
def include_router(
self, router: "APIRouter", *, prefix: str = "", tags: List[str] = None
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
for route in router.routes:
if isinstance(route, APIRoute):
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
include_in_schema=route.include_in_schema,
content_type=route.content_type,
name=route.name,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=route.methods,
include_in_schema=route.include_in_schema,
name=route.name,
)
def get(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
import asyncio
import inspect
import logging
from typing import Any, Callable, List, Optional, Type
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import get_body_field, get_dependant, solve_dependencies
from fastapi.encoders import jsonable_encoder
from fastapi.utils import UnconstrainedConfig
from pydantic import BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import compile_path, get_name, request_response
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
def serialize_response(*, field: Field = None, response: Response) -> Any:
encoded = jsonable_encoder(response)
if field:
errors = []
value, errors_ = field.validate(encoded, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors)
return jsonable_encoder(value)
else:
return encoded
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
content_type: Type[Response] = JSONResponse,
response_field: Field = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must me a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
raw_body = await request.form()
form_fields = {}
for field, value in raw_body.items():
form_fields[field] = value
if form_fields:
body = form_fields
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error("Error getting request body", e)
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
)
values, errors = await solve_dependencies(
request=request, dependant=dependant, body=body
)
if errors:
errors_out = ValidationError(errors)
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY, detail=errors_out.errors()
)
else:
assert dependant.call is not None, "dependant.call must me a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
return raw_response
response_data = serialize_response(
field=response_field, response=raw_response
)
return content_type(content=response_data, status_code=status_code)
return app
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
name: str = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
) -> None:
assert path.startswith("/"), "Routed paths must always start with '/'"
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.response_model = response_model
if self.response_model:
assert lenient_issubclass(
content_type, JSONResponse
), "To declare a type the response must be a JSON response"
response_name = "Response_" + self.name
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators=[],
default=None,
required=False,
model_config=UnconstrainedConfig,
schema=Schema(None),
)
else:
self.response_field = None
self.status_code = status_code
self.tags = tags or []
self.summary = summary
self.description = description or self.endpoint.__doc__
self.response_description = response_description
self.deprecated = deprecated
if methods is None:
methods = ["GET"]
self.methods = methods
self.operation_id = operation_id
self.include_in_schema = include_in_schema
self.content_type = content_type
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=path, call=self.endpoint)
self.body_field = get_body_field(dependant=self.dependant, name=self.name)
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
content_type=self.content_type,
response_field=self.response_field,
)
)
class APIRouter(routing.Router):
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> None:
route = APIRoute(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
return func
return decorator
def include_router(
self, router: "APIRouter", *, prefix: str = "", tags: List[str] = None
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
for route in router.routes:
if isinstance(route, APIRoute):
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
include_in_schema=route.include_in_schema,
content_type=route.content_type,
name=route.name,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=route.methods,
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
def get(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-15-fixed/fastapi/fastapi/routing.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-15-buggy/fastapi/fastapi/routing.py |
fastapi-bug-11 | import asyncio
import inspect
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from fastapi import params
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import get_path_param_names
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.fields import Field, Required, Shape
from pydantic.schema import get_annotation_from_schema
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
sequence_shapes = {
Shape.LIST,
Shape.SET,
Shape.TUPLE,
Shape.SEQUENCE,
Shape.TUPLE_ELLIPS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
Shape.LIST: list,
Shape.SET: set,
Shape.TUPLE: tuple,
Shape.SEQUENCE: list,
Shape.TUPLE_ELLIPS: list,
}
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: List[str] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable,
path: str,
name: str = None,
security_scopes: List[str] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
sub_dependant.security_scopes = security_scopes
return sub_dependant
def get_flat_dependant(dependant: Dependant) -> Dependant:
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
flat_sub = get_flat_dependant(sub_dependant)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def is_scalar_field(field: Field) -> bool:
return (
field.shape == Shape.SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not isinstance(field.schema, params.Body)
)
def is_scalar_sequence_field(field: Field) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_dependant(
*,
path: str,
call: Callable,
name: str = None,
security_scopes: List[str] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = inspect.signature(call)
signature_params = endpoint_signature.parameters
dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(param=param, default_schema=params.Query)
if param_name in path_param_names:
assert param.default == param.empty or isinstance(
param.default, params.Path
), "Path params must have no defaults or use Path(...)"
assert is_scalar_field(
field=param_field
), f"Path params must be of one of the supported types"
param_field = get_param_field(
param=param,
default_schema=params.Path,
force_type=params.ParamTypes.path,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
assert isinstance(
param_field.schema, params.Body
), f"Param: {param_field.name} can only be a request body, using Body(...)"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
default_schema: Type[params.Param] = params.Param,
force_type: params.ParamTypes = None,
) -> Field:
default_value = Required
had_schema = False
if not param.default == param.empty:
default_value = param.default
if isinstance(default_value, Schema):
had_schema = True
schema = default_value
default_value = schema.default
if isinstance(schema, params.Param) and getattr(schema, "in_", None) is None:
schema.in_ = default_schema.in_
if force_type:
schema.in_ = force_type # type: ignore
else:
schema = default_schema(default_value)
required = default_value == Required
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_schema(annotation, schema)
if not schema.alias and getattr(schema, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = schema.alias or param.name
field = Field(
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
schema=schema,
)
if not had_schema and not is_scalar_field(field=field):
field.schema = params.Body(schema.default)
return field
def add_param_to_fields(*, field: Field, dependant: Dependant) -> None:
field.schema = cast(params.Param, field.schema)
if field.schema.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field.schema.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field.schema.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field.schema.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable) -> bool:
if inspect.isfunction(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
call = getattr(call, "__call__", None)
return asyncio.iscoroutinefunction(call)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: BackgroundTasks = None,
response: Response = None,
dependency_overrides_provider: Any = None,
dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable, Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
response = response or Response( # type: ignore
content=None, status_code=None, headers=None, media_type=None, background=None
)
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable, sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable, Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
sub_values, sub_errors, background_tasks, sub_response, sub_dependency_cache = (
solved_result
)
sub_response = cast(Response, sub_response)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
body_values, body_errors = await request_body_to_args( # type: ignore # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[Field],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
schema = field.schema
assert isinstance(schema, params.Param), "Params must be subclasses of Param"
if value is None:
if field.required:
errors.append(
ErrorWrapper(
MissingError(),
loc=(schema.in_.value, field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(value, values, loc=(schema.in_.value, field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[Field],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
embed = getattr(field.schema, "embed", None)
if len(required_params) == 1 and not embed:
received_body = {field.alias: received_body}
for field in required_params:
value: Any = None
if received_body is not None:
if field.shape in sequence_shapes and isinstance(
received_body, FormData
):
value = received_body.getlist(field.alias)
else:
value = received_body.get(field.alias)
if (
value is None
or (isinstance(field.schema, params.Form) and value == "")
or (
isinstance(field.schema, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
errors.append(
ErrorWrapper(
MissingError(), loc=("body", field.alias), config=BaseConfig
)
)
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
awaitables = [sub_value.read() for sub_value in value]
contents = await asyncio.gather(*awaitables)
value = sequence_shape_to_type[field.shape](contents)
v_, errors_ = field.validate(value, values, loc=("body", field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_schema_compatible_field(*, field: Field) -> Field:
out_field = field
if lenient_issubclass(field.type_, UploadFile):
use_type: type = bytes
if field.shape in sequence_shapes:
use_type = List[bytes]
out_field = Field(
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema,
)
return out_field
def get_body_field(*, dependant: Dependant, name: str) -> Optional[Field]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
embed = getattr(first_param.schema, "embed", None)
if len(flat_dependant.body_params) == 1 and not embed:
return get_schema_compatible_field(field=first_param)
model_name = "Body_" + name
BodyModel = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)
required = any(True for f in flat_dependant.body_params if f.required)
if any(isinstance(f.schema, params.File) for f in flat_dependant.body_params):
BodySchema: Type[params.Body] = params.File
elif any(isinstance(f.schema, params.Form) for f in flat_dependant.body_params):
BodySchema = params.Form
else:
BodySchema = params.Body
field = Field(
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
schema=BodySchema(None),
)
return field
import asyncio
import inspect
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from fastapi import params
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import get_path_param_names
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.fields import Field, Required, Shape
from pydantic.schema import get_annotation_from_schema
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
sequence_shapes = {
Shape.LIST,
Shape.SET,
Shape.TUPLE,
Shape.SEQUENCE,
Shape.TUPLE_ELLIPS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
Shape.LIST: list,
Shape.SET: set,
Shape.TUPLE: tuple,
Shape.SEQUENCE: list,
Shape.TUPLE_ELLIPS: list,
}
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: List[str] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable,
path: str,
name: str = None,
security_scopes: List[str] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
sub_dependant.security_scopes = security_scopes
return sub_dependant
def get_flat_dependant(dependant: Dependant) -> Dependant:
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
flat_sub = get_flat_dependant(sub_dependant)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def is_scalar_field(field: Field) -> bool:
if not (
field.shape == Shape.SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not isinstance(field.schema, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: Field) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_dependant(
*,
path: str,
call: Callable,
name: str = None,
security_scopes: List[str] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = inspect.signature(call)
signature_params = endpoint_signature.parameters
dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(param=param, default_schema=params.Query)
if param_name in path_param_names:
assert param.default == param.empty or isinstance(
param.default, params.Path
), "Path params must have no defaults or use Path(...)"
assert is_scalar_field(
field=param_field
), f"Path params must be of one of the supported types"
param_field = get_param_field(
param=param,
default_schema=params.Path,
force_type=params.ParamTypes.path,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
assert isinstance(
param_field.schema, params.Body
), f"Param: {param_field.name} can only be a request body, using Body(...)"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
default_schema: Type[params.Param] = params.Param,
force_type: params.ParamTypes = None,
) -> Field:
default_value = Required
had_schema = False
if not param.default == param.empty:
default_value = param.default
if isinstance(default_value, Schema):
had_schema = True
schema = default_value
default_value = schema.default
if isinstance(schema, params.Param) and getattr(schema, "in_", None) is None:
schema.in_ = default_schema.in_
if force_type:
schema.in_ = force_type # type: ignore
else:
schema = default_schema(default_value)
required = default_value == Required
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_schema(annotation, schema)
if not schema.alias and getattr(schema, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = schema.alias or param.name
field = Field(
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
schema=schema,
)
if not had_schema and not is_scalar_field(field=field):
field.schema = params.Body(schema.default)
return field
def add_param_to_fields(*, field: Field, dependant: Dependant) -> None:
field.schema = cast(params.Param, field.schema)
if field.schema.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field.schema.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field.schema.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field.schema.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable) -> bool:
if inspect.isfunction(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
call = getattr(call, "__call__", None)
return asyncio.iscoroutinefunction(call)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: BackgroundTasks = None,
response: Response = None,
dependency_overrides_provider: Any = None,
dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable, Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
response = response or Response( # type: ignore
content=None, status_code=None, headers=None, media_type=None, background=None
)
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable, sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable, Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
sub_values, sub_errors, background_tasks, sub_response, sub_dependency_cache = (
solved_result
)
sub_response = cast(Response, sub_response)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
body_values, body_errors = await request_body_to_args( # type: ignore # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[Field],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
schema = field.schema
assert isinstance(schema, params.Param), "Params must be subclasses of Param"
if value is None:
if field.required:
errors.append(
ErrorWrapper(
MissingError(),
loc=(schema.in_.value, field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(value, values, loc=(schema.in_.value, field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[Field],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
embed = getattr(field.schema, "embed", None)
if len(required_params) == 1 and not embed:
received_body = {field.alias: received_body}
for field in required_params:
value: Any = None
if received_body is not None:
if field.shape in sequence_shapes and isinstance(
received_body, FormData
):
value = received_body.getlist(field.alias)
else:
value = received_body.get(field.alias)
if (
value is None
or (isinstance(field.schema, params.Form) and value == "")
or (
isinstance(field.schema, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
errors.append(
ErrorWrapper(
MissingError(), loc=("body", field.alias), config=BaseConfig
)
)
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
awaitables = [sub_value.read() for sub_value in value]
contents = await asyncio.gather(*awaitables)
value = sequence_shape_to_type[field.shape](contents)
v_, errors_ = field.validate(value, values, loc=("body", field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_schema_compatible_field(*, field: Field) -> Field:
out_field = field
if lenient_issubclass(field.type_, UploadFile):
use_type: type = bytes
if field.shape in sequence_shapes:
use_type = List[bytes]
out_field = Field(
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema,
)
return out_field
def get_body_field(*, dependant: Dependant, name: str) -> Optional[Field]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
embed = getattr(first_param.schema, "embed", None)
if len(flat_dependant.body_params) == 1 and not embed:
return get_schema_compatible_field(field=first_param)
model_name = "Body_" + name
BodyModel = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)
required = any(True for f in flat_dependant.body_params if f.required)
if any(isinstance(f.schema, params.File) for f in flat_dependant.body_params):
BodySchema: Type[params.Body] = params.File
elif any(isinstance(f.schema, params.Form) for f in flat_dependant.body_params):
BodySchema = params.Form
else:
BodySchema = params.Body
field = Field(
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
schema=BodySchema(None),
)
return field
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-11-fixed/fastapi/fastapi/dependencies/utils.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-11-buggy/fastapi/fastapi/dependencies/utils.py |
fastapi-bug-6 | import asyncio
import inspect
from contextlib import contextmanager
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from fastapi import params
from fastapi.concurrency import (
AsyncExitStack,
_fake_asynccontextmanager,
asynccontextmanager,
contextmanager_in_threadpool,
)
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import PYDANTIC_1, get_field_info, get_path_param_names
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
try:
from pydantic.fields import (
SHAPE_LIST,
SHAPE_SEQUENCE,
SHAPE_SET,
SHAPE_SINGLETON,
SHAPE_TUPLE,
SHAPE_TUPLE_ELLIPSIS,
FieldInfo,
ModelField,
Required,
)
from pydantic.schema import get_annotation_from_field_info
from pydantic.typing import ForwardRef, evaluate_forwardref
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic.fields import Required, Shape # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
from pydantic.schema import get_annotation_from_schema # type: ignore
from pydantic.utils import ForwardRef, evaluate_forwardref # type: ignore
SHAPE_LIST = Shape.LIST
SHAPE_SEQUENCE = Shape.SEQUENCE
SHAPE_SET = Shape.SET
SHAPE_SINGLETON = Shape.SINGLETON
SHAPE_TUPLE = Shape.TUPLE
SHAPE_TUPLE_ELLIPSIS = Shape.TUPLE_ELLIPS
def get_annotation_from_field_info(
annotation: Any, field_info: FieldInfo, field_name: str
) -> Type[Any]:
return get_annotation_from_schema(annotation, field_info)
sequence_shapes = {
SHAPE_LIST,
SHAPE_SET,
SHAPE_TUPLE,
SHAPE_SEQUENCE,
SHAPE_TUPLE_ELLIPSIS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
SHAPE_LIST: list,
SHAPE_SET: set,
SHAPE_TUPLE: tuple,
SHAPE_SEQUENCE: list,
SHAPE_TUPLE_ELLIPSIS: list,
}
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: List[str] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable,
path: str,
name: str = None,
security_scopes: List[str] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
sub_dependant.security_scopes = security_scopes
return sub_dependant
CacheKey = Tuple[Optional[Callable], Tuple[str, ...]]
def get_flat_dependant(
dependant: Dependant, *, skip_repeats: bool = False, visited: List[CacheKey] = None
) -> Dependant:
if visited is None:
visited = []
visited.append(dependant.cache_key)
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
if skip_repeats and sub_dependant.cache_key in visited:
continue
flat_sub = get_flat_dependant(
sub_dependant, skip_repeats=skip_repeats, visited=visited
)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def is_scalar_field(field: ModelField) -> bool:
field_info = get_field_info(field)
if not (
field.shape == SHAPE_SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not isinstance(field_info, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: ModelField) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_typed_signature(call: Callable) -> inspect.Signature:
signature = inspect.signature(call)
globalns = getattr(call, "__globals__", {})
typed_params = [
inspect.Parameter(
name=param.name,
kind=param.kind,
default=param.default,
annotation=get_typed_annotation(param, globalns),
)
for param in signature.parameters.values()
]
typed_signature = inspect.Signature(typed_params)
return typed_signature
def get_typed_annotation(param: inspect.Parameter, globalns: Dict[str, Any]) -> Any:
annotation = param.annotation
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
annotation = evaluate_forwardref(annotation, globalns, globalns)
return annotation
async_contextmanager_dependencies_error = """
FastAPI dependencies with yield require Python 3.7 or above,
or the backports for Python 3.6, installed with:
pip install async-exit-stack async-generator
"""
def check_dependency_contextmanagers() -> None:
if AsyncExitStack is None or asynccontextmanager == _fake_asynccontextmanager:
raise RuntimeError(async_contextmanager_dependencies_error) # pragma: no cover
def get_dependant(
*,
path: str,
call: Callable,
name: str = None,
security_scopes: List[str] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = get_typed_signature(call)
signature_params = endpoint_signature.parameters
if inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):
check_dependency_contextmanagers()
dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(
param=param, default_field_info=params.Query, param_name=param_name
)
if param_name in path_param_names:
assert is_scalar_field(
field=param_field
), f"Path params must be of one of the supported types"
if isinstance(param.default, params.Path):
ignore_default = False
else:
ignore_default = True
param_field = get_param_field(
param=param,
param_name=param_name,
default_field_info=params.Path,
force_type=params.ParamTypes.path,
ignore_default=ignore_default,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
field_info = get_field_info(param_field)
assert isinstance(
field_info, params.Body
), f"Param: {param_field.name} can only be a request body, using Body(...)"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
param_name: str,
default_field_info: Type[params.Param] = params.Param,
force_type: params.ParamTypes = None,
ignore_default: bool = False,
) -> ModelField:
default_value = Required
had_schema = False
if not param.default == param.empty and ignore_default is False:
default_value = param.default
if isinstance(default_value, FieldInfo):
had_schema = True
field_info = default_value
default_value = field_info.default
if (
isinstance(field_info, params.Param)
and getattr(field_info, "in_", None) is None
):
field_info.in_ = default_field_info.in_
if force_type:
field_info.in_ = force_type # type: ignore
else:
field_info = default_field_info(default_value)
required = default_value == Required
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_field_info(annotation, field_info, param_name)
if not field_info.alias and getattr(field_info, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = field_info.alias or param.name
if PYDANTIC_1:
field = ModelField(
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
field_info=field_info,
)
# TODO: remove when removing support for Pydantic < 1.2.0
field.required = required
else: # pragma: nocover
field = ModelField( # type: ignore
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
schema=field_info,
)
field.required = required
if not had_schema and not is_scalar_field(field=field):
if PYDANTIC_1:
field.field_info = params.Body(field_info.default)
else:
field.schema = params.Body(field_info.default) # type: ignore # pragma: nocover
return field
def add_param_to_fields(*, field: ModelField, dependant: Dependant) -> None:
field_info = cast(params.Param, get_field_info(field))
if field_info.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field_info.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field_info.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field_info.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable) -> bool:
if inspect.isroutine(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
call = getattr(call, "__call__", None)
return asyncio.iscoroutinefunction(call)
async def solve_generator(
*, call: Callable, stack: AsyncExitStack, sub_values: Dict[str, Any]
) -> Any:
if inspect.isgeneratorfunction(call):
cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values))
elif inspect.isasyncgenfunction(call):
cm = asynccontextmanager(call)(**sub_values)
return await stack.enter_async_context(cm)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: BackgroundTasks = None,
response: Response = None,
dependency_overrides_provider: Any = None,
dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable, Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
response = response or Response(
content=None,
status_code=None, # type: ignore
headers=None,
media_type=None,
background=None,
)
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable, sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable, Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
(
sub_values,
sub_errors,
background_tasks,
sub_response,
sub_dependency_cache,
) = solved_result
sub_response = cast(Response, sub_response)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):
stack = request.scope.get("fastapi_astack")
if stack is None:
raise RuntimeError(
async_contextmanager_dependencies_error
) # pragma: no cover
solved = await solve_generator(
call=call, stack=stack, sub_values=sub_values
)
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
(
body_values,
body_errors,
) = await request_body_to_args( # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[ModelField],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
field_info = get_field_info(field)
assert isinstance(
field_info, params.Param
), "Params must be subclasses of Param"
if value is None:
if field.required:
if PYDANTIC_1:
errors.append(
ErrorWrapper(
MissingError(), loc=(field_info.in_.value, field.alias)
)
)
else: # pragma: nocover
errors.append(
ErrorWrapper( # type: ignore
MissingError(),
loc=(field_info.in_.value, field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(
value, values, loc=(field_info.in_.value, field.alias)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[ModelField],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
field_info = get_field_info(field)
embed = getattr(field_info, "embed", None)
if len(required_params) == 1 and not embed:
received_body = {field.alias: received_body}
for field in required_params:
value: Any = None
if received_body is not None:
if field.shape in sequence_shapes and isinstance(
received_body, FormData
):
value = received_body.getlist(field.alias)
else:
value = received_body.get(field.alias)
if (
value is None
or (isinstance(field_info, params.Form) and value == "")
or (
isinstance(field_info, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
if PYDANTIC_1:
errors.append(
ErrorWrapper(MissingError(), loc=("body", field.alias))
)
else: # pragma: nocover
errors.append(
ErrorWrapper( # type: ignore
MissingError(),
loc=("body", field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
awaitables = [sub_value.read() for sub_value in value]
contents = await asyncio.gather(*awaitables)
value = sequence_shape_to_type[field.shape](contents)
v_, errors_ = field.validate(value, values, loc=("body", field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_schema_compatible_field(*, field: ModelField) -> ModelField:
out_field = field
if lenient_issubclass(field.type_, UploadFile):
use_type: type = bytes
if field.shape in sequence_shapes:
use_type = List[bytes]
if PYDANTIC_1:
out_field = ModelField(
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
field_info=field.field_info,
)
else: # pragma: nocover
out_field = ModelField( # type: ignore
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema, # type: ignore
)
return out_field
def get_body_field(*, dependant: Dependant, name: str) -> Optional[ModelField]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
field_info = get_field_info(first_param)
embed = getattr(field_info, "embed", None)
if len(flat_dependant.body_params) == 1 and not embed:
return get_schema_compatible_field(field=first_param)
model_name = "Body_" + name
BodyModel = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)
required = any(True for f in flat_dependant.body_params if f.required)
BodyFieldInfo_kwargs: Dict[str, Any] = dict(default=None)
if any(
isinstance(get_field_info(f), params.File) for f in flat_dependant.body_params
):
BodyFieldInfo: Type[params.Body] = params.File
elif any(
isinstance(get_field_info(f), params.Form) for f in flat_dependant.body_params
):
BodyFieldInfo = params.Form
else:
BodyFieldInfo = params.Body
body_param_media_types = [
getattr(get_field_info(f), "media_type")
for f in flat_dependant.body_params
if isinstance(get_field_info(f), params.Body)
]
if len(set(body_param_media_types)) == 1:
BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0]
if PYDANTIC_1:
field = ModelField(
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
field_info=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
else: # pragma: nocover
field = ModelField( # type: ignore
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
schema=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
return field
import asyncio
import inspect
from contextlib import contextmanager
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from fastapi import params
from fastapi.concurrency import (
AsyncExitStack,
_fake_asynccontextmanager,
asynccontextmanager,
contextmanager_in_threadpool,
)
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import PYDANTIC_1, get_field_info, get_path_param_names
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
try:
from pydantic.fields import (
SHAPE_LIST,
SHAPE_SEQUENCE,
SHAPE_SET,
SHAPE_SINGLETON,
SHAPE_TUPLE,
SHAPE_TUPLE_ELLIPSIS,
FieldInfo,
ModelField,
Required,
)
from pydantic.schema import get_annotation_from_field_info
from pydantic.typing import ForwardRef, evaluate_forwardref
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic.fields import Required, Shape # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
from pydantic.schema import get_annotation_from_schema # type: ignore
from pydantic.utils import ForwardRef, evaluate_forwardref # type: ignore
SHAPE_LIST = Shape.LIST
SHAPE_SEQUENCE = Shape.SEQUENCE
SHAPE_SET = Shape.SET
SHAPE_SINGLETON = Shape.SINGLETON
SHAPE_TUPLE = Shape.TUPLE
SHAPE_TUPLE_ELLIPSIS = Shape.TUPLE_ELLIPS
def get_annotation_from_field_info(
annotation: Any, field_info: FieldInfo, field_name: str
) -> Type[Any]:
return get_annotation_from_schema(annotation, field_info)
sequence_shapes = {
SHAPE_LIST,
SHAPE_SET,
SHAPE_TUPLE,
SHAPE_SEQUENCE,
SHAPE_TUPLE_ELLIPSIS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
SHAPE_LIST: list,
SHAPE_SET: set,
SHAPE_TUPLE: tuple,
SHAPE_SEQUENCE: list,
SHAPE_TUPLE_ELLIPSIS: list,
}
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: List[str] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable,
path: str,
name: str = None,
security_scopes: List[str] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
sub_dependant.security_scopes = security_scopes
return sub_dependant
CacheKey = Tuple[Optional[Callable], Tuple[str, ...]]
def get_flat_dependant(
dependant: Dependant, *, skip_repeats: bool = False, visited: List[CacheKey] = None
) -> Dependant:
if visited is None:
visited = []
visited.append(dependant.cache_key)
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
if skip_repeats and sub_dependant.cache_key in visited:
continue
flat_sub = get_flat_dependant(
sub_dependant, skip_repeats=skip_repeats, visited=visited
)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def is_scalar_field(field: ModelField) -> bool:
field_info = get_field_info(field)
if not (
field.shape == SHAPE_SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not isinstance(field_info, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: ModelField) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_typed_signature(call: Callable) -> inspect.Signature:
signature = inspect.signature(call)
globalns = getattr(call, "__globals__", {})
typed_params = [
inspect.Parameter(
name=param.name,
kind=param.kind,
default=param.default,
annotation=get_typed_annotation(param, globalns),
)
for param in signature.parameters.values()
]
typed_signature = inspect.Signature(typed_params)
return typed_signature
def get_typed_annotation(param: inspect.Parameter, globalns: Dict[str, Any]) -> Any:
annotation = param.annotation
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
annotation = evaluate_forwardref(annotation, globalns, globalns)
return annotation
async_contextmanager_dependencies_error = """
FastAPI dependencies with yield require Python 3.7 or above,
or the backports for Python 3.6, installed with:
pip install async-exit-stack async-generator
"""
def check_dependency_contextmanagers() -> None:
if AsyncExitStack is None or asynccontextmanager == _fake_asynccontextmanager:
raise RuntimeError(async_contextmanager_dependencies_error) # pragma: no cover
def get_dependant(
*,
path: str,
call: Callable,
name: str = None,
security_scopes: List[str] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = get_typed_signature(call)
signature_params = endpoint_signature.parameters
if inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):
check_dependency_contextmanagers()
dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(
param=param, default_field_info=params.Query, param_name=param_name
)
if param_name in path_param_names:
assert is_scalar_field(
field=param_field
), f"Path params must be of one of the supported types"
if isinstance(param.default, params.Path):
ignore_default = False
else:
ignore_default = True
param_field = get_param_field(
param=param,
param_name=param_name,
default_field_info=params.Path,
force_type=params.ParamTypes.path,
ignore_default=ignore_default,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
field_info = get_field_info(param_field)
assert isinstance(
field_info, params.Body
), f"Param: {param_field.name} can only be a request body, using Body(...)"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
param_name: str,
default_field_info: Type[params.Param] = params.Param,
force_type: params.ParamTypes = None,
ignore_default: bool = False,
) -> ModelField:
default_value = Required
had_schema = False
if not param.default == param.empty and ignore_default is False:
default_value = param.default
if isinstance(default_value, FieldInfo):
had_schema = True
field_info = default_value
default_value = field_info.default
if (
isinstance(field_info, params.Param)
and getattr(field_info, "in_", None) is None
):
field_info.in_ = default_field_info.in_
if force_type:
field_info.in_ = force_type # type: ignore
else:
field_info = default_field_info(default_value)
required = default_value == Required
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_field_info(annotation, field_info, param_name)
if not field_info.alias and getattr(field_info, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = field_info.alias or param.name
if PYDANTIC_1:
field = ModelField(
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
field_info=field_info,
)
# TODO: remove when removing support for Pydantic < 1.2.0
field.required = required
else: # pragma: nocover
field = ModelField( # type: ignore
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
schema=field_info,
)
field.required = required
if not had_schema and not is_scalar_field(field=field):
if PYDANTIC_1:
field.field_info = params.Body(field_info.default)
else:
field.schema = params.Body(field_info.default) # type: ignore # pragma: nocover
return field
def add_param_to_fields(*, field: ModelField, dependant: Dependant) -> None:
field_info = cast(params.Param, get_field_info(field))
if field_info.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field_info.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field_info.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field_info.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable) -> bool:
if inspect.isroutine(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
call = getattr(call, "__call__", None)
return asyncio.iscoroutinefunction(call)
async def solve_generator(
*, call: Callable, stack: AsyncExitStack, sub_values: Dict[str, Any]
) -> Any:
if inspect.isgeneratorfunction(call):
cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values))
elif inspect.isasyncgenfunction(call):
cm = asynccontextmanager(call)(**sub_values)
return await stack.enter_async_context(cm)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: BackgroundTasks = None,
response: Response = None,
dependency_overrides_provider: Any = None,
dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable, Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
response = response or Response(
content=None,
status_code=None, # type: ignore
headers=None,
media_type=None,
background=None,
)
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable, sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable, Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
(
sub_values,
sub_errors,
background_tasks,
sub_response,
sub_dependency_cache,
) = solved_result
sub_response = cast(Response, sub_response)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):
stack = request.scope.get("fastapi_astack")
if stack is None:
raise RuntimeError(
async_contextmanager_dependencies_error
) # pragma: no cover
solved = await solve_generator(
call=call, stack=stack, sub_values=sub_values
)
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
(
body_values,
body_errors,
) = await request_body_to_args( # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[ModelField],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
field_info = get_field_info(field)
assert isinstance(
field_info, params.Param
), "Params must be subclasses of Param"
if value is None:
if field.required:
if PYDANTIC_1:
errors.append(
ErrorWrapper(
MissingError(), loc=(field_info.in_.value, field.alias)
)
)
else: # pragma: nocover
errors.append(
ErrorWrapper( # type: ignore
MissingError(),
loc=(field_info.in_.value, field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(
value, values, loc=(field_info.in_.value, field.alias)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[ModelField],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
field_info = get_field_info(field)
embed = getattr(field_info, "embed", None)
if len(required_params) == 1 and not embed:
received_body = {field.alias: received_body}
for field in required_params:
value: Any = None
if received_body is not None:
if (
field.shape in sequence_shapes or field.type_ in sequence_types
) and isinstance(received_body, FormData):
value = received_body.getlist(field.alias)
else:
value = received_body.get(field.alias)
if (
value is None
or (isinstance(field_info, params.Form) and value == "")
or (
isinstance(field_info, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
if PYDANTIC_1:
errors.append(
ErrorWrapper(MissingError(), loc=("body", field.alias))
)
else: # pragma: nocover
errors.append(
ErrorWrapper( # type: ignore
MissingError(),
loc=("body", field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
awaitables = [sub_value.read() for sub_value in value]
contents = await asyncio.gather(*awaitables)
value = sequence_shape_to_type[field.shape](contents)
v_, errors_ = field.validate(value, values, loc=("body", field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_schema_compatible_field(*, field: ModelField) -> ModelField:
out_field = field
if lenient_issubclass(field.type_, UploadFile):
use_type: type = bytes
if field.shape in sequence_shapes:
use_type = List[bytes]
if PYDANTIC_1:
out_field = ModelField(
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
field_info=field.field_info,
)
else: # pragma: nocover
out_field = ModelField( # type: ignore
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema, # type: ignore
)
return out_field
def get_body_field(*, dependant: Dependant, name: str) -> Optional[ModelField]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
field_info = get_field_info(first_param)
embed = getattr(field_info, "embed", None)
if len(flat_dependant.body_params) == 1 and not embed:
return get_schema_compatible_field(field=first_param)
model_name = "Body_" + name
BodyModel = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)
required = any(True for f in flat_dependant.body_params if f.required)
BodyFieldInfo_kwargs: Dict[str, Any] = dict(default=None)
if any(
isinstance(get_field_info(f), params.File) for f in flat_dependant.body_params
):
BodyFieldInfo: Type[params.Body] = params.File
elif any(
isinstance(get_field_info(f), params.Form) for f in flat_dependant.body_params
):
BodyFieldInfo = params.Form
else:
BodyFieldInfo = params.Body
body_param_media_types = [
getattr(get_field_info(f), "media_type")
for f in flat_dependant.body_params
if isinstance(get_field_info(f), params.Body)
]
if len(set(body_param_media_types)) == 1:
BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0]
if PYDANTIC_1:
field = ModelField(
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
field_info=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
else: # pragma: nocover
field = ModelField( # type: ignore
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
schema=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
return field
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-6-fixed/fastapi/fastapi/dependencies/utils.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-6-buggy/fastapi/fastapi/dependencies/utils.py |
fastapi-bug-16 | from enum import Enum
from types import GeneratorType
from typing import Any, Set
from pydantic import BaseModel
from pydantic.json import ENCODERS_BY_TYPE
def jsonable_encoder(
obj: Any,
include: Set[str] = None,
exclude: Set[str] = set(),
by_alias: bool = False,
include_none: bool = True,
custom_encoder: dict = {},
) -> Any:
if isinstance(obj, BaseModel):
if not obj.Config.json_encoders:
return jsonable_encoder(
obj.dict(include=include, exclude=exclude, by_alias=by_alias),
include_none=include_none,
)
else:
return jsonable_encoder(
obj.dict(include=include, exclude=exclude, by_alias=by_alias),
include_none=include_none,
custom_encoder=obj.Config.json_encoders,
)
if isinstance(obj, Enum):
return obj.value
if isinstance(obj, (str, int, float, type(None))):
return obj
if isinstance(obj, dict):
return {
jsonable_encoder(
key,
by_alias=by_alias,
include_none=include_none,
custom_encoder=custom_encoder,
): jsonable_encoder(
value,
by_alias=by_alias,
include_none=include_none,
custom_encoder=custom_encoder,
)
for key, value in obj.items()
if value is not None or include_none
}
if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
return [
jsonable_encoder(
item,
include=include,
exclude=exclude,
by_alias=by_alias,
include_none=include_none,
custom_encoder=custom_encoder,
)
for item in obj
]
errors = []
try:
if custom_encoder and type(obj) in custom_encoder:
encoder = custom_encoder[type(obj)]
else:
encoder = ENCODERS_BY_TYPE[type(obj)]
return encoder(obj)
except KeyError as e:
errors.append(e)
try:
data = dict(obj)
except Exception as e:
errors.append(e)
try:
data = vars(obj)
except Exception as e:
errors.append(e)
raise ValueError(errors)
return jsonable_encoder(data, by_alias=by_alias, include_none=include_none)
from enum import Enum
from types import GeneratorType
from typing import Any, Set
from pydantic import BaseModel
from pydantic.json import ENCODERS_BY_TYPE
def jsonable_encoder(
obj: Any,
include: Set[str] = None,
exclude: Set[str] = set(),
by_alias: bool = False,
include_none: bool = True,
custom_encoder: dict = {},
) -> Any:
if isinstance(obj, BaseModel):
encoder = getattr(obj.Config, "json_encoders", custom_encoder)
return jsonable_encoder(
obj.dict(include=include, exclude=exclude, by_alias=by_alias),
include_none=include_none,
custom_encoder=encoder,
)
if isinstance(obj, Enum):
return obj.value
if isinstance(obj, (str, int, float, type(None))):
return obj
if isinstance(obj, dict):
return {
jsonable_encoder(
key,
by_alias=by_alias,
include_none=include_none,
custom_encoder=custom_encoder,
): jsonable_encoder(
value,
by_alias=by_alias,
include_none=include_none,
custom_encoder=custom_encoder,
)
for key, value in obj.items()
if value is not None or include_none
}
if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
return [
jsonable_encoder(
item,
include=include,
exclude=exclude,
by_alias=by_alias,
include_none=include_none,
custom_encoder=custom_encoder,
)
for item in obj
]
errors = []
try:
if custom_encoder and type(obj) in custom_encoder:
encoder = custom_encoder[type(obj)]
else:
encoder = ENCODERS_BY_TYPE[type(obj)]
return encoder(obj)
except KeyError as e:
errors.append(e)
try:
data = dict(obj)
except Exception as e:
errors.append(e)
try:
data = vars(obj)
except Exception as e:
errors.append(e)
raise ValueError(errors)
return jsonable_encoder(data, by_alias=by_alias, include_none=include_none)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-16-fixed/fastapi/fastapi/encoders.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-16-buggy/fastapi/fastapi/encoders.py |
fastapi-bug-14 | import logging
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Schema as PSchema
from pydantic.types import UrlStr
try:
import email_validator
assert email_validator # make autoflake ignore the unused import
from pydantic.types import EmailStr # type: ignore
except ImportError: # pragma: no cover
logging.warning(
"email-validator not installed, email fields will be treated as str.\n"
+ "To install, run: pip install email-validator"
)
class EmailStr(str): # type: ignore
pass
class Contact(BaseModel):
name: Optional[str] = None
url: Optional[UrlStr] = None
email: Optional[EmailStr] = None
class License(BaseModel):
name: str
url: Optional[UrlStr] = None
class Info(BaseModel):
title: str
description: Optional[str] = None
termsOfService: Optional[str] = None
contact: Optional[Contact] = None
license: Optional[License] = None
version: str
class ServerVariable(BaseModel):
enum: Optional[List[str]] = None
default: str
description: Optional[str] = None
class Server(BaseModel):
url: UrlStr
description: Optional[str] = None
variables: Optional[Dict[str, ServerVariable]] = None
class Reference(BaseModel):
ref: str = PSchema(..., alias="$ref") # type: ignore
class Discriminator(BaseModel):
propertyName: str
mapping: Optional[Dict[str, str]] = None
class XML(BaseModel):
name: Optional[str] = None
namespace: Optional[str] = None
prefix: Optional[str] = None
attribute: Optional[bool] = None
wrapped: Optional[bool] = None
class ExternalDocumentation(BaseModel):
description: Optional[str] = None
url: UrlStr
class SchemaBase(BaseModel):
ref: Optional[str] = PSchema(None, alias="$ref") # type: ignore
title: Optional[str] = None
multipleOf: Optional[float] = None
maximum: Optional[float] = None
exclusiveMaximum: Optional[float] = None
minimum: Optional[float] = None
exclusiveMinimum: Optional[float] = None
maxLength: Optional[int] = PSchema(None, gte=0) # type: ignore
minLength: Optional[int] = PSchema(None, gte=0) # type: ignore
pattern: Optional[str] = None
maxItems: Optional[int] = PSchema(None, gte=0) # type: ignore
minItems: Optional[int] = PSchema(None, gte=0) # type: ignore
uniqueItems: Optional[bool] = None
maxProperties: Optional[int] = PSchema(None, gte=0) # type: ignore
minProperties: Optional[int] = PSchema(None, gte=0) # type: ignore
required: Optional[List[str]] = None
enum: Optional[List[str]] = None
type: Optional[str] = None
allOf: Optional[List[Any]] = None
oneOf: Optional[List[Any]] = None
anyOf: Optional[List[Any]] = None
not_: Optional[List[Any]] = PSchema(None, alias="not") # type: ignore
items: Optional[Any] = None
properties: Optional[Dict[str, Any]] = None
additionalProperties: Optional[Union[bool, Any]] = None
description: Optional[str] = None
format: Optional[str] = None
default: Optional[Any] = None
nullable: Optional[bool] = None
discriminator: Optional[Discriminator] = None
readOnly: Optional[bool] = None
writeOnly: Optional[bool] = None
xml: Optional[XML] = None
externalDocs: Optional[ExternalDocumentation] = None
example: Optional[Any] = None
deprecated: Optional[bool] = None
class Schema(SchemaBase):
allOf: Optional[List[SchemaBase]] = None
oneOf: Optional[List[SchemaBase]] = None
anyOf: Optional[List[SchemaBase]] = None
not_: Optional[List[SchemaBase]] = PSchema(None, alias="not") # type: ignore
items: Optional[SchemaBase] = None
properties: Optional[Dict[str, SchemaBase]] = None
additionalProperties: Optional[Union[bool, SchemaBase]] = None
class Example(BaseModel):
summary: Optional[str] = None
description: Optional[str] = None
value: Optional[Any] = None
externalValue: Optional[UrlStr] = None
class ParameterInType(Enum):
query = "query"
header = "header"
path = "path"
cookie = "cookie"
class Encoding(BaseModel):
contentType: Optional[str] = None
# Workaround OpenAPI recursive reference, using Any
headers: Optional[Dict[str, Union[Any, Reference]]] = None
style: Optional[str] = None
explode: Optional[bool] = None
allowReserved: Optional[bool] = None
class MediaType(BaseModel):
schema_: Optional[Union[Schema, Reference]] = PSchema(
None, alias="schema"
) # type: ignore
example: Optional[Any] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
encoding: Optional[Dict[str, Encoding]] = None
class ParameterBase(BaseModel):
description: Optional[str] = None
required: Optional[bool] = None
deprecated: Optional[bool] = None
# Serialization rules for simple scenarios
style: Optional[str] = None
explode: Optional[bool] = None
allowReserved: Optional[bool] = None
schema_: Optional[Union[Schema, Reference]] = PSchema(
None, alias="schema"
) # type: ignore
example: Optional[Any] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
# Serialization rules for more complex scenarios
content: Optional[Dict[str, MediaType]] = None
class Parameter(ParameterBase):
name: str
in_: ParameterInType = PSchema(..., alias="in") # type: ignore
class Header(ParameterBase):
pass
# Workaround OpenAPI recursive reference
class EncodingWithHeaders(Encoding):
headers: Optional[Dict[str, Union[Header, Reference]]] = None
class RequestBody(BaseModel):
description: Optional[str] = None
content: Dict[str, MediaType]
required: Optional[bool] = None
class Link(BaseModel):
operationRef: Optional[str] = None
operationId: Optional[str] = None
parameters: Optional[Dict[str, Union[Any, str]]] = None
requestBody: Optional[Union[Any, str]] = None
description: Optional[str] = None
server: Optional[Server] = None
class Response(BaseModel):
description: str
headers: Optional[Dict[str, Union[Header, Reference]]] = None
content: Optional[Dict[str, MediaType]] = None
links: Optional[Dict[str, Union[Link, Reference]]] = None
class Responses(BaseModel):
default: Response
class Operation(BaseModel):
tags: Optional[List[str]] = None
summary: Optional[str] = None
description: Optional[str] = None
externalDocs: Optional[ExternalDocumentation] = None
operationId: Optional[str] = None
parameters: Optional[List[Union[Parameter, Reference]]] = None
requestBody: Optional[Union[RequestBody, Reference]] = None
responses: Union[Responses, Dict[Union[str], Response]]
# Workaround OpenAPI recursive reference
callbacks: Optional[Dict[str, Union[Dict[str, Any], Reference]]] = None
deprecated: Optional[bool] = None
security: Optional[List[Dict[str, List[str]]]] = None
servers: Optional[List[Server]] = None
class PathItem(BaseModel):
ref: Optional[str] = PSchema(None, alias="$ref") # type: ignore
summary: Optional[str] = None
description: Optional[str] = None
get: Optional[Operation] = None
put: Optional[Operation] = None
post: Optional[Operation] = None
delete: Optional[Operation] = None
options: Optional[Operation] = None
head: Optional[Operation] = None
patch: Optional[Operation] = None
trace: Optional[Operation] = None
servers: Optional[List[Server]] = None
parameters: Optional[List[Union[Parameter, Reference]]] = None
# Workaround OpenAPI recursive reference
class OperationWithCallbacks(BaseModel):
callbacks: Optional[Dict[str, Union[Dict[str, PathItem], Reference]]] = None
class SecuritySchemeType(Enum):
apiKey = "apiKey"
http = "http"
oauth2 = "oauth2"
openIdConnect = "openIdConnect"
class SecurityBase(BaseModel):
type_: SecuritySchemeType = PSchema(..., alias="type") # type: ignore
description: Optional[str] = None
class APIKeyIn(Enum):
query = "query"
header = "header"
cookie = "cookie"
class APIKey(SecurityBase):
type_ = PSchema(SecuritySchemeType.apiKey, alias="type") # type: ignore
in_: APIKeyIn = PSchema(..., alias="in") # type: ignore
name: str
class HTTPBase(SecurityBase):
type_ = PSchema(SecuritySchemeType.http, alias="type") # type: ignore
scheme: str
class HTTPBearer(HTTPBase):
scheme = "bearer"
bearerFormat: Optional[str] = None
class OAuthFlow(BaseModel):
refreshUrl: Optional[str] = None
scopes: Dict[str, str] = {}
class OAuthFlowImplicit(OAuthFlow):
authorizationUrl: str
class OAuthFlowPassword(OAuthFlow):
tokenUrl: str
class OAuthFlowClientCredentials(OAuthFlow):
tokenUrl: str
class OAuthFlowAuthorizationCode(OAuthFlow):
authorizationUrl: str
tokenUrl: str
class OAuthFlows(BaseModel):
implicit: Optional[OAuthFlowImplicit] = None
password: Optional[OAuthFlowPassword] = None
clientCredentials: Optional[OAuthFlowClientCredentials] = None
authorizationCode: Optional[OAuthFlowAuthorizationCode] = None
class OAuth2(SecurityBase):
type_ = PSchema(SecuritySchemeType.oauth2, alias="type") # type: ignore
flows: OAuthFlows
class OpenIdConnect(SecurityBase):
type_ = PSchema(SecuritySchemeType.openIdConnect, alias="type") # type: ignore
openIdConnectUrl: str
SecurityScheme = Union[APIKey, HTTPBase, OAuth2, OpenIdConnect, HTTPBearer]
class Components(BaseModel):
schemas: Optional[Dict[str, Union[Schema, Reference]]] = None
responses: Optional[Dict[str, Union[Response, Reference]]] = None
parameters: Optional[Dict[str, Union[Parameter, Reference]]] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
requestBodies: Optional[Dict[str, Union[RequestBody, Reference]]] = None
headers: Optional[Dict[str, Union[Header, Reference]]] = None
securitySchemes: Optional[Dict[str, Union[SecurityScheme, Reference]]] = None
links: Optional[Dict[str, Union[Link, Reference]]] = None
callbacks: Optional[Dict[str, Union[Dict[str, PathItem], Reference]]] = None
class Tag(BaseModel):
name: str
description: Optional[str] = None
externalDocs: Optional[ExternalDocumentation] = None
class OpenAPI(BaseModel):
openapi: str
info: Info
servers: Optional[List[Server]] = None
paths: Dict[str, PathItem]
components: Optional[Components] = None
security: Optional[List[Dict[str, List[str]]]] = None
tags: Optional[List[Tag]] = None
externalDocs: Optional[ExternalDocumentation] = None
import logging
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Schema as PSchema
from pydantic.types import UrlStr
try:
import email_validator
assert email_validator # make autoflake ignore the unused import
from pydantic.types import EmailStr # type: ignore
except ImportError: # pragma: no cover
logging.warning(
"email-validator not installed, email fields will be treated as str.\n"
+ "To install, run: pip install email-validator"
)
class EmailStr(str): # type: ignore
pass
class Contact(BaseModel):
name: Optional[str] = None
url: Optional[UrlStr] = None
email: Optional[EmailStr] = None
class License(BaseModel):
name: str
url: Optional[UrlStr] = None
class Info(BaseModel):
title: str
description: Optional[str] = None
termsOfService: Optional[str] = None
contact: Optional[Contact] = None
license: Optional[License] = None
version: str
class ServerVariable(BaseModel):
enum: Optional[List[str]] = None
default: str
description: Optional[str] = None
class Server(BaseModel):
url: UrlStr
description: Optional[str] = None
variables: Optional[Dict[str, ServerVariable]] = None
class Reference(BaseModel):
ref: str = PSchema(..., alias="$ref") # type: ignore
class Discriminator(BaseModel):
propertyName: str
mapping: Optional[Dict[str, str]] = None
class XML(BaseModel):
name: Optional[str] = None
namespace: Optional[str] = None
prefix: Optional[str] = None
attribute: Optional[bool] = None
wrapped: Optional[bool] = None
class ExternalDocumentation(BaseModel):
description: Optional[str] = None
url: UrlStr
class SchemaBase(BaseModel):
ref: Optional[str] = PSchema(None, alias="$ref") # type: ignore
title: Optional[str] = None
multipleOf: Optional[float] = None
maximum: Optional[float] = None
exclusiveMaximum: Optional[float] = None
minimum: Optional[float] = None
exclusiveMinimum: Optional[float] = None
maxLength: Optional[int] = PSchema(None, gte=0) # type: ignore
minLength: Optional[int] = PSchema(None, gte=0) # type: ignore
pattern: Optional[str] = None
maxItems: Optional[int] = PSchema(None, gte=0) # type: ignore
minItems: Optional[int] = PSchema(None, gte=0) # type: ignore
uniqueItems: Optional[bool] = None
maxProperties: Optional[int] = PSchema(None, gte=0) # type: ignore
minProperties: Optional[int] = PSchema(None, gte=0) # type: ignore
required: Optional[List[str]] = None
enum: Optional[List[str]] = None
type: Optional[str] = None
allOf: Optional[List[Any]] = None
oneOf: Optional[List[Any]] = None
anyOf: Optional[List[Any]] = None
not_: Optional[List[Any]] = PSchema(None, alias="not") # type: ignore
items: Optional[Any] = None
properties: Optional[Dict[str, Any]] = None
additionalProperties: Optional[Union[Dict[str, Any], bool]] = None
description: Optional[str] = None
format: Optional[str] = None
default: Optional[Any] = None
nullable: Optional[bool] = None
discriminator: Optional[Discriminator] = None
readOnly: Optional[bool] = None
writeOnly: Optional[bool] = None
xml: Optional[XML] = None
externalDocs: Optional[ExternalDocumentation] = None
example: Optional[Any] = None
deprecated: Optional[bool] = None
class Schema(SchemaBase):
allOf: Optional[List[SchemaBase]] = None
oneOf: Optional[List[SchemaBase]] = None
anyOf: Optional[List[SchemaBase]] = None
not_: Optional[List[SchemaBase]] = PSchema(None, alias="not") # type: ignore
items: Optional[SchemaBase] = None
properties: Optional[Dict[str, SchemaBase]] = None
additionalProperties: Optional[Union[SchemaBase, bool]] = None
class Example(BaseModel):
summary: Optional[str] = None
description: Optional[str] = None
value: Optional[Any] = None
externalValue: Optional[UrlStr] = None
class ParameterInType(Enum):
query = "query"
header = "header"
path = "path"
cookie = "cookie"
class Encoding(BaseModel):
contentType: Optional[str] = None
# Workaround OpenAPI recursive reference, using Any
headers: Optional[Dict[str, Union[Any, Reference]]] = None
style: Optional[str] = None
explode: Optional[bool] = None
allowReserved: Optional[bool] = None
class MediaType(BaseModel):
schema_: Optional[Union[Schema, Reference]] = PSchema(
None, alias="schema"
) # type: ignore
example: Optional[Any] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
encoding: Optional[Dict[str, Encoding]] = None
class ParameterBase(BaseModel):
description: Optional[str] = None
required: Optional[bool] = None
deprecated: Optional[bool] = None
# Serialization rules for simple scenarios
style: Optional[str] = None
explode: Optional[bool] = None
allowReserved: Optional[bool] = None
schema_: Optional[Union[Schema, Reference]] = PSchema(
None, alias="schema"
) # type: ignore
example: Optional[Any] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
# Serialization rules for more complex scenarios
content: Optional[Dict[str, MediaType]] = None
class Parameter(ParameterBase):
name: str
in_: ParameterInType = PSchema(..., alias="in") # type: ignore
class Header(ParameterBase):
pass
# Workaround OpenAPI recursive reference
class EncodingWithHeaders(Encoding):
headers: Optional[Dict[str, Union[Header, Reference]]] = None
class RequestBody(BaseModel):
description: Optional[str] = None
content: Dict[str, MediaType]
required: Optional[bool] = None
class Link(BaseModel):
operationRef: Optional[str] = None
operationId: Optional[str] = None
parameters: Optional[Dict[str, Union[Any, str]]] = None
requestBody: Optional[Union[Any, str]] = None
description: Optional[str] = None
server: Optional[Server] = None
class Response(BaseModel):
description: str
headers: Optional[Dict[str, Union[Header, Reference]]] = None
content: Optional[Dict[str, MediaType]] = None
links: Optional[Dict[str, Union[Link, Reference]]] = None
class Responses(BaseModel):
default: Response
class Operation(BaseModel):
tags: Optional[List[str]] = None
summary: Optional[str] = None
description: Optional[str] = None
externalDocs: Optional[ExternalDocumentation] = None
operationId: Optional[str] = None
parameters: Optional[List[Union[Parameter, Reference]]] = None
requestBody: Optional[Union[RequestBody, Reference]] = None
responses: Union[Responses, Dict[str, Response]]
# Workaround OpenAPI recursive reference
callbacks: Optional[Dict[str, Union[Dict[str, Any], Reference]]] = None
deprecated: Optional[bool] = None
security: Optional[List[Dict[str, List[str]]]] = None
servers: Optional[List[Server]] = None
class PathItem(BaseModel):
ref: Optional[str] = PSchema(None, alias="$ref") # type: ignore
summary: Optional[str] = None
description: Optional[str] = None
get: Optional[Operation] = None
put: Optional[Operation] = None
post: Optional[Operation] = None
delete: Optional[Operation] = None
options: Optional[Operation] = None
head: Optional[Operation] = None
patch: Optional[Operation] = None
trace: Optional[Operation] = None
servers: Optional[List[Server]] = None
parameters: Optional[List[Union[Parameter, Reference]]] = None
# Workaround OpenAPI recursive reference
class OperationWithCallbacks(BaseModel):
callbacks: Optional[Dict[str, Union[Dict[str, PathItem], Reference]]] = None
class SecuritySchemeType(Enum):
apiKey = "apiKey"
http = "http"
oauth2 = "oauth2"
openIdConnect = "openIdConnect"
class SecurityBase(BaseModel):
type_: SecuritySchemeType = PSchema(..., alias="type") # type: ignore
description: Optional[str] = None
class APIKeyIn(Enum):
query = "query"
header = "header"
cookie = "cookie"
class APIKey(SecurityBase):
type_ = PSchema(SecuritySchemeType.apiKey, alias="type") # type: ignore
in_: APIKeyIn = PSchema(..., alias="in") # type: ignore
name: str
class HTTPBase(SecurityBase):
type_ = PSchema(SecuritySchemeType.http, alias="type") # type: ignore
scheme: str
class HTTPBearer(HTTPBase):
scheme = "bearer"
bearerFormat: Optional[str] = None
class OAuthFlow(BaseModel):
refreshUrl: Optional[str] = None
scopes: Dict[str, str] = {}
class OAuthFlowImplicit(OAuthFlow):
authorizationUrl: str
class OAuthFlowPassword(OAuthFlow):
tokenUrl: str
class OAuthFlowClientCredentials(OAuthFlow):
tokenUrl: str
class OAuthFlowAuthorizationCode(OAuthFlow):
authorizationUrl: str
tokenUrl: str
class OAuthFlows(BaseModel):
implicit: Optional[OAuthFlowImplicit] = None
password: Optional[OAuthFlowPassword] = None
clientCredentials: Optional[OAuthFlowClientCredentials] = None
authorizationCode: Optional[OAuthFlowAuthorizationCode] = None
class OAuth2(SecurityBase):
type_ = PSchema(SecuritySchemeType.oauth2, alias="type") # type: ignore
flows: OAuthFlows
class OpenIdConnect(SecurityBase):
type_ = PSchema(SecuritySchemeType.openIdConnect, alias="type") # type: ignore
openIdConnectUrl: str
SecurityScheme = Union[APIKey, HTTPBase, OAuth2, OpenIdConnect, HTTPBearer]
class Components(BaseModel):
schemas: Optional[Dict[str, Union[Schema, Reference]]] = None
responses: Optional[Dict[str, Union[Response, Reference]]] = None
parameters: Optional[Dict[str, Union[Parameter, Reference]]] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
requestBodies: Optional[Dict[str, Union[RequestBody, Reference]]] = None
headers: Optional[Dict[str, Union[Header, Reference]]] = None
securitySchemes: Optional[Dict[str, Union[SecurityScheme, Reference]]] = None
links: Optional[Dict[str, Union[Link, Reference]]] = None
callbacks: Optional[Dict[str, Union[Dict[str, PathItem], Reference]]] = None
class Tag(BaseModel):
name: str
description: Optional[str] = None
externalDocs: Optional[ExternalDocumentation] = None
class OpenAPI(BaseModel):
openapi: str
info: Info
servers: Optional[List[Server]] = None
paths: Dict[str, PathItem]
components: Optional[Components] = None
security: Optional[List[Dict[str, List[str]]]] = None
tags: Optional[List[Tag]] = None
externalDocs: Optional[ExternalDocumentation] = None
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-14-fixed/fastapi/fastapi/openapi/models.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-14-buggy/fastapi/fastapi/openapi/models.py |
fastapi-bug-9 | import asyncio
import inspect
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from fastapi import params
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import get_path_param_names
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.fields import Field, Required, Shape
from pydantic.schema import get_annotation_from_schema
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
sequence_shapes = {
Shape.LIST,
Shape.SET,
Shape.TUPLE,
Shape.SEQUENCE,
Shape.TUPLE_ELLIPS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
Shape.LIST: list,
Shape.SET: set,
Shape.TUPLE: tuple,
Shape.SEQUENCE: list,
Shape.TUPLE_ELLIPS: list,
}
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: List[str] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable,
path: str,
name: str = None,
security_scopes: List[str] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
sub_dependant.security_scopes = security_scopes
return sub_dependant
CacheKey = Tuple[Optional[Callable], Tuple[str, ...]]
def get_flat_dependant(
dependant: Dependant, *, skip_repeats: bool = False, visited: List[CacheKey] = None
) -> Dependant:
if visited is None:
visited = []
visited.append(dependant.cache_key)
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
if skip_repeats and sub_dependant.cache_key in visited:
continue
flat_sub = get_flat_dependant(
sub_dependant, skip_repeats=skip_repeats, visited=visited
)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def is_scalar_field(field: Field) -> bool:
if not (
field.shape == Shape.SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not isinstance(field.schema, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: Field) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_dependant(
*,
path: str,
call: Callable,
name: str = None,
security_scopes: List[str] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = inspect.signature(call)
signature_params = endpoint_signature.parameters
dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(param=param, default_schema=params.Query)
if param_name in path_param_names:
assert param.default == param.empty or isinstance(
param.default, params.Path
), "Path params must have no defaults or use Path(...)"
assert is_scalar_field(
field=param_field
), f"Path params must be of one of the supported types"
param_field = get_param_field(
param=param,
default_schema=params.Path,
force_type=params.ParamTypes.path,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
assert isinstance(
param_field.schema, params.Body
), f"Param: {param_field.name} can only be a request body, using Body(...)"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
default_schema: Type[params.Param] = params.Param,
force_type: params.ParamTypes = None,
) -> Field:
default_value = Required
had_schema = False
if not param.default == param.empty:
default_value = param.default
if isinstance(default_value, Schema):
had_schema = True
schema = default_value
default_value = schema.default
if isinstance(schema, params.Param) and getattr(schema, "in_", None) is None:
schema.in_ = default_schema.in_
if force_type:
schema.in_ = force_type # type: ignore
else:
schema = default_schema(default_value)
required = default_value == Required
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_schema(annotation, schema)
if not schema.alias and getattr(schema, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = schema.alias or param.name
field = Field(
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
schema=schema,
)
if not had_schema and not is_scalar_field(field=field):
field.schema = params.Body(schema.default)
return field
def add_param_to_fields(*, field: Field, dependant: Dependant) -> None:
field.schema = cast(params.Param, field.schema)
if field.schema.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field.schema.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field.schema.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field.schema.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable) -> bool:
if inspect.isfunction(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
call = getattr(call, "__call__", None)
return asyncio.iscoroutinefunction(call)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: BackgroundTasks = None,
response: Response = None,
dependency_overrides_provider: Any = None,
dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable, Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
response = response or Response( # type: ignore
content=None, status_code=None, headers=None, media_type=None, background=None
)
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable, sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable, Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
sub_values, sub_errors, background_tasks, sub_response, sub_dependency_cache = (
solved_result
)
sub_response = cast(Response, sub_response)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
body_values, body_errors = await request_body_to_args( # type: ignore # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[Field],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
schema = field.schema
assert isinstance(schema, params.Param), "Params must be subclasses of Param"
if value is None:
if field.required:
errors.append(
ErrorWrapper(
MissingError(),
loc=(schema.in_.value, field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(value, values, loc=(schema.in_.value, field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[Field],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
embed = getattr(field.schema, "embed", None)
if len(required_params) == 1 and not embed:
received_body = {field.alias: received_body}
for field in required_params:
value: Any = None
if received_body is not None:
if field.shape in sequence_shapes and isinstance(
received_body, FormData
):
value = received_body.getlist(field.alias)
else:
value = received_body.get(field.alias)
if (
value is None
or (isinstance(field.schema, params.Form) and value == "")
or (
isinstance(field.schema, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
errors.append(
ErrorWrapper(
MissingError(), loc=("body", field.alias), config=BaseConfig
)
)
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
awaitables = [sub_value.read() for sub_value in value]
contents = await asyncio.gather(*awaitables)
value = sequence_shape_to_type[field.shape](contents)
v_, errors_ = field.validate(value, values, loc=("body", field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_schema_compatible_field(*, field: Field) -> Field:
out_field = field
if lenient_issubclass(field.type_, UploadFile):
use_type: type = bytes
if field.shape in sequence_shapes:
use_type = List[bytes]
out_field = Field(
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema,
)
return out_field
def get_body_field(*, dependant: Dependant, name: str) -> Optional[Field]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
embed = getattr(first_param.schema, "embed", None)
if len(flat_dependant.body_params) == 1 and not embed:
return get_schema_compatible_field(field=first_param)
model_name = "Body_" + name
BodyModel = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)
required = any(True for f in flat_dependant.body_params if f.required)
if any(isinstance(f.schema, params.File) for f in flat_dependant.body_params):
BodySchema: Type[params.Body] = params.File
elif any(isinstance(f.schema, params.Form) for f in flat_dependant.body_params):
BodySchema = params.Form
else:
BodySchema = params.Body
field = Field(
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
schema=BodySchema(None),
)
return field
import asyncio
import inspect
from copy import deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from fastapi import params
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import get_path_param_names
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.fields import Field, Required, Shape
from pydantic.schema import get_annotation_from_schema
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
sequence_shapes = {
Shape.LIST,
Shape.SET,
Shape.TUPLE,
Shape.SEQUENCE,
Shape.TUPLE_ELLIPS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
Shape.LIST: list,
Shape.SET: set,
Shape.TUPLE: tuple,
Shape.SEQUENCE: list,
Shape.TUPLE_ELLIPS: list,
}
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: List[str] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable,
path: str,
name: str = None,
security_scopes: List[str] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
sub_dependant.security_scopes = security_scopes
return sub_dependant
CacheKey = Tuple[Optional[Callable], Tuple[str, ...]]
def get_flat_dependant(
dependant: Dependant, *, skip_repeats: bool = False, visited: List[CacheKey] = None
) -> Dependant:
if visited is None:
visited = []
visited.append(dependant.cache_key)
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
if skip_repeats and sub_dependant.cache_key in visited:
continue
flat_sub = get_flat_dependant(
sub_dependant, skip_repeats=skip_repeats, visited=visited
)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def is_scalar_field(field: Field) -> bool:
if not (
field.shape == Shape.SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not isinstance(field.schema, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: Field) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_dependant(
*,
path: str,
call: Callable,
name: str = None,
security_scopes: List[str] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = inspect.signature(call)
signature_params = endpoint_signature.parameters
dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(param=param, default_schema=params.Query)
if param_name in path_param_names:
assert param.default == param.empty or isinstance(
param.default, params.Path
), "Path params must have no defaults or use Path(...)"
assert is_scalar_field(
field=param_field
), f"Path params must be of one of the supported types"
param_field = get_param_field(
param=param,
default_schema=params.Path,
force_type=params.ParamTypes.path,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
assert isinstance(
param_field.schema, params.Body
), f"Param: {param_field.name} can only be a request body, using Body(...)"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
default_schema: Type[params.Param] = params.Param,
force_type: params.ParamTypes = None,
) -> Field:
default_value = Required
had_schema = False
if not param.default == param.empty:
default_value = param.default
if isinstance(default_value, Schema):
had_schema = True
schema = default_value
default_value = schema.default
if isinstance(schema, params.Param) and getattr(schema, "in_", None) is None:
schema.in_ = default_schema.in_
if force_type:
schema.in_ = force_type # type: ignore
else:
schema = default_schema(default_value)
required = default_value == Required
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_schema(annotation, schema)
if not schema.alias and getattr(schema, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = schema.alias or param.name
field = Field(
name=param.name,
type_=annotation,
default=None if required else default_value,
alias=alias,
required=required,
model_config=BaseConfig,
class_validators={},
schema=schema,
)
if not had_schema and not is_scalar_field(field=field):
field.schema = params.Body(schema.default)
return field
def add_param_to_fields(*, field: Field, dependant: Dependant) -> None:
field.schema = cast(params.Param, field.schema)
if field.schema.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field.schema.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field.schema.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field.schema.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable) -> bool:
if inspect.isfunction(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
call = getattr(call, "__call__", None)
return asyncio.iscoroutinefunction(call)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: BackgroundTasks = None,
response: Response = None,
dependency_overrides_provider: Any = None,
dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable, Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
response = response or Response( # type: ignore
content=None, status_code=None, headers=None, media_type=None, background=None
)
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable, sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable, Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
sub_values, sub_errors, background_tasks, sub_response, sub_dependency_cache = (
solved_result
)
sub_response = cast(Response, sub_response)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
body_values, body_errors = await request_body_to_args( # type: ignore # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[Field],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
schema = field.schema
assert isinstance(schema, params.Param), "Params must be subclasses of Param"
if value is None:
if field.required:
errors.append(
ErrorWrapper(
MissingError(),
loc=(schema.in_.value, field.alias),
config=BaseConfig,
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(value, values, loc=(schema.in_.value, field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[Field],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
embed = getattr(field.schema, "embed", None)
if len(required_params) == 1 and not embed:
received_body = {field.alias: received_body}
for field in required_params:
value: Any = None
if received_body is not None:
if field.shape in sequence_shapes and isinstance(
received_body, FormData
):
value = received_body.getlist(field.alias)
else:
value = received_body.get(field.alias)
if (
value is None
or (isinstance(field.schema, params.Form) and value == "")
or (
isinstance(field.schema, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
errors.append(
ErrorWrapper(
MissingError(), loc=("body", field.alias), config=BaseConfig
)
)
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field.schema, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
awaitables = [sub_value.read() for sub_value in value]
contents = await asyncio.gather(*awaitables)
value = sequence_shape_to_type[field.shape](contents)
v_, errors_ = field.validate(value, values, loc=("body", field.alias))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_schema_compatible_field(*, field: Field) -> Field:
out_field = field
if lenient_issubclass(field.type_, UploadFile):
use_type: type = bytes
if field.shape in sequence_shapes:
use_type = List[bytes]
out_field = Field(
name=field.name,
type_=use_type,
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema,
)
return out_field
def get_body_field(*, dependant: Dependant, name: str) -> Optional[Field]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
embed = getattr(first_param.schema, "embed", None)
if len(flat_dependant.body_params) == 1 and not embed:
return get_schema_compatible_field(field=first_param)
model_name = "Body_" + name
BodyModel = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)
required = any(True for f in flat_dependant.body_params if f.required)
BodySchema_kwargs: Dict[str, Any] = dict(default=None)
if any(isinstance(f.schema, params.File) for f in flat_dependant.body_params):
BodySchema: Type[params.Body] = params.File
elif any(isinstance(f.schema, params.Form) for f in flat_dependant.body_params):
BodySchema = params.Form
else:
BodySchema = params.Body
body_param_media_types = [
getattr(f.schema, "media_type")
for f in flat_dependant.body_params
if isinstance(f.schema, params.Body)
]
if len(set(body_param_media_types)) == 1:
BodySchema_kwargs["media_type"] = body_param_media_types[0]
field = Field(
name="body",
type_=BodyModel,
default=None,
required=required,
model_config=BaseConfig,
class_validators={},
alias="body",
schema=BodySchema(**BodySchema_kwargs),
)
return field
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-9-fixed/fastapi/fastapi/dependencies/utils.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-9-buggy/fastapi/fastapi/dependencies/utils.py |
fastapi-bug-10 | import asyncio
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.utils import create_cloned_field, generate_operation_id_for_path
from pydantic import BaseConfig, BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
def serialize_response(
*,
field: Field = None,
response: Response,
include: Set[str] = None,
exclude: Set[str] = set(),
by_alias: bool = True,
skip_defaults: bool = False,
) -> Any:
if field:
errors = []
value, errors_ = field.validate(response, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
)
else:
return jsonable_encoder(response)
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: Field = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors)
else:
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = serialize_response(
field=response_field,
response=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
skip_defaults=response_model_skip_defaults,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
assert lenient_issubclass(
response_class, JSONResponse
), "To declare a type the response must be a JSON response"
response_name = "Response_" + self.unique_id
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[Field] = create_cloned_field(
self.response_field
)
else:
self.response_field = None
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert lenient_issubclass(
model, BaseModel
), "A response model must be a Pydantic model"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = Field(
name=response_name,
type_=model,
class_validators=None,
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], Field] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_skip_defaults = response_model_skip_defaults
self.include_in_schema = include_in_schema
self.response_class = response_class
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_skip_defaults=self.response_model_skip_defaults,
dependency_overrides_provider=self.dependency_overrides_provider,
)
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
) -> None:
super().__init__(
routes=routes, redirect_slashes=redirect_slashes, default=default
)
self.dependency_overrides_provider = dependency_overrides_provider
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> None:
route = APIRoute(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(path, endpoint=endpoint, name=name)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_skip_defaults=route.response_model_skip_defaults,
include_in_schema=route.include_in_schema,
response_class=route.response_class,
name=route.name,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
import asyncio
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.utils import create_cloned_field, generate_operation_id_for_path
from pydantic import BaseConfig, BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
def serialize_response(
*,
field: Field = None,
response: Response,
include: Set[str] = None,
exclude: Set[str] = set(),
by_alias: bool = True,
skip_defaults: bool = False,
) -> Any:
if field:
errors = []
value, errors_ = field.validate(response, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors)
if skip_defaults and isinstance(response, BaseModel):
value = response.dict(skip_defaults=skip_defaults)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
)
else:
return jsonable_encoder(response)
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: Field = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors)
else:
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = serialize_response(
field=response_field,
response=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
skip_defaults=response_model_skip_defaults,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
assert lenient_issubclass(
response_class, JSONResponse
), "To declare a type the response must be a JSON response"
response_name = "Response_" + self.unique_id
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[Field] = create_cloned_field(
self.response_field
)
else:
self.response_field = None
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert lenient_issubclass(
model, BaseModel
), "A response model must be a Pydantic model"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = Field(
name=response_name,
type_=model,
class_validators=None,
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], Field] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_skip_defaults = response_model_skip_defaults
self.include_in_schema = include_in_schema
self.response_class = response_class
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_skip_defaults=self.response_model_skip_defaults,
dependency_overrides_provider=self.dependency_overrides_provider,
)
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
) -> None:
super().__init__(
routes=routes, redirect_slashes=redirect_slashes, default=default
)
self.dependency_overrides_provider = dependency_overrides_provider
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> None:
route = APIRoute(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(path, endpoint=endpoint, name=name)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_skip_defaults=route.response_model_skip_defaults,
include_in_schema=route.include_in_schema,
response_class=route.response_class,
name=route.name,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Set[str] = None,
response_model_exclude: Set[str] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-10-fixed/fastapi/fastapi/routing.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-10-buggy/fastapi/fastapi/routing.py |
fastapi-bug-1 | from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union
from fastapi import routing
from fastapi.concurrency import AsyncExitStack
from fastapi.encoders import DictIntStrAny, SetIntStr
from fastapi.exception_handlers import (
http_exception_handler,
request_validation_exception_handler,
)
from fastapi.exceptions import RequestValidationError
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.openapi.utils import get_openapi
from fastapi.params import Depends
from fastapi.utils import warning_response_model_skip_defaults_deprecated
from starlette.applications import Starlette
from starlette.datastructures import State
from starlette.exceptions import HTTPException
from starlette.middleware import Middleware
from starlette.requests import Request
from starlette.responses import HTMLResponse, JSONResponse, Response
from starlette.routing import BaseRoute
from starlette.types import Receive, Scope, Send
class FastAPI(Starlette):
def __init__(
self,
*,
debug: bool = False,
routes: List[BaseRoute] = None,
title: str = "FastAPI",
description: str = "",
version: str = "0.1.0",
openapi_url: Optional[str] = "/openapi.json",
openapi_prefix: str = "",
default_response_class: Type[Response] = JSONResponse,
docs_url: Optional[str] = "/docs",
redoc_url: Optional[str] = "/redoc",
swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect",
swagger_ui_init_oauth: Optional[dict] = None,
middleware: Sequence[Middleware] = None,
exception_handlers: Dict[Union[int, Type[Exception]], Callable] = None,
on_startup: Sequence[Callable] = None,
on_shutdown: Sequence[Callable] = None,
**extra: Dict[str, Any],
) -> None:
self.default_response_class = default_response_class
self._debug = debug
self.state = State()
self.router: routing.APIRouter = routing.APIRouter(
routes,
dependency_overrides_provider=self,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
self.exception_handlers = (
{} if exception_handlers is None else dict(exception_handlers)
)
self.user_middleware = [] if middleware is None else list(middleware)
self.middleware_stack = self.build_middleware_stack()
self.title = title
self.description = description
self.version = version
self.openapi_url = openapi_url
self.openapi_prefix = openapi_prefix.rstrip("/")
self.docs_url = docs_url
self.redoc_url = redoc_url
self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url
self.swagger_ui_init_oauth = swagger_ui_init_oauth
self.extra = extra
self.dependency_overrides: Dict[Callable, Callable] = {}
self.openapi_version = "3.0.2"
if self.openapi_url:
assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'"
assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'"
if self.docs_url or self.redoc_url:
assert self.openapi_url, "The openapi_url is required for the docs"
self.openapi_schema: Optional[Dict[str, Any]] = None
self.setup()
def openapi(self) -> Dict:
if not self.openapi_schema:
self.openapi_schema = get_openapi(
title=self.title,
version=self.version,
openapi_version=self.openapi_version,
description=self.description,
routes=self.routes,
openapi_prefix=self.openapi_prefix,
)
return self.openapi_schema
def setup(self) -> None:
if self.openapi_url:
async def openapi(req: Request) -> JSONResponse:
return JSONResponse(self.openapi())
self.add_route(self.openapi_url, openapi, include_in_schema=False)
openapi_url = self.openapi_prefix + self.openapi_url
if self.openapi_url and self.docs_url:
async def swagger_ui_html(req: Request) -> HTMLResponse:
return get_swagger_ui_html(
openapi_url=openapi_url,
title=self.title + " - Swagger UI",
oauth2_redirect_url=self.swagger_ui_oauth2_redirect_url,
init_oauth=self.swagger_ui_init_oauth,
)
self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False)
if self.swagger_ui_oauth2_redirect_url:
async def swagger_ui_redirect(req: Request) -> HTMLResponse:
return get_swagger_ui_oauth2_redirect_html()
self.add_route(
self.swagger_ui_oauth2_redirect_url,
swagger_ui_redirect,
include_in_schema=False,
)
if self.openapi_url and self.redoc_url:
async def redoc_html(req: Request) -> HTMLResponse:
return get_redoc_html(
openapi_url=openapi_url, title=self.title + " - ReDoc"
)
self.add_route(self.redoc_url, redoc_html, include_in_schema=False)
self.add_exception_handler(HTTPException, http_exception_handler)
self.add_exception_handler(
RequestValidationError, request_validation_exception_handler
)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if AsyncExitStack:
async with AsyncExitStack() as stack:
scope["fastapi_astack"] = stack
await super().__call__(scope, receive, send)
else:
await super().__call__(scope, receive, send) # pragma: no cover
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> None:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
self.router.add_api_route(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
def decorator(func: Callable) -> Callable:
self.router.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
self.router.add_api_websocket_route(path, endpoint, name=name)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: routing.APIRouter,
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
self.router.include_router(
router,
prefix=prefix,
tags=tags,
dependencies=dependencies,
responses=responses or {},
default_response_class=default_response_class
or self.default_response_class,
)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.get(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.put(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.post(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.delete(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
operation_id=operation_id,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.options(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.head(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.patch(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.trace(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union
from fastapi import routing
from fastapi.concurrency import AsyncExitStack
from fastapi.encoders import DictIntStrAny, SetIntStr
from fastapi.exception_handlers import (
http_exception_handler,
request_validation_exception_handler,
)
from fastapi.exceptions import RequestValidationError
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.openapi.utils import get_openapi
from fastapi.params import Depends
from fastapi.utils import warning_response_model_skip_defaults_deprecated
from starlette.applications import Starlette
from starlette.datastructures import State
from starlette.exceptions import HTTPException
from starlette.middleware import Middleware
from starlette.requests import Request
from starlette.responses import HTMLResponse, JSONResponse, Response
from starlette.routing import BaseRoute
from starlette.types import Receive, Scope, Send
class FastAPI(Starlette):
def __init__(
self,
*,
debug: bool = False,
routes: List[BaseRoute] = None,
title: str = "FastAPI",
description: str = "",
version: str = "0.1.0",
openapi_url: Optional[str] = "/openapi.json",
openapi_prefix: str = "",
default_response_class: Type[Response] = JSONResponse,
docs_url: Optional[str] = "/docs",
redoc_url: Optional[str] = "/redoc",
swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect",
swagger_ui_init_oauth: Optional[dict] = None,
middleware: Sequence[Middleware] = None,
exception_handlers: Dict[Union[int, Type[Exception]], Callable] = None,
on_startup: Sequence[Callable] = None,
on_shutdown: Sequence[Callable] = None,
**extra: Dict[str, Any],
) -> None:
self.default_response_class = default_response_class
self._debug = debug
self.state = State()
self.router: routing.APIRouter = routing.APIRouter(
routes,
dependency_overrides_provider=self,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
self.exception_handlers = (
{} if exception_handlers is None else dict(exception_handlers)
)
self.user_middleware = [] if middleware is None else list(middleware)
self.middleware_stack = self.build_middleware_stack()
self.title = title
self.description = description
self.version = version
self.openapi_url = openapi_url
self.openapi_prefix = openapi_prefix.rstrip("/")
self.docs_url = docs_url
self.redoc_url = redoc_url
self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url
self.swagger_ui_init_oauth = swagger_ui_init_oauth
self.extra = extra
self.dependency_overrides: Dict[Callable, Callable] = {}
self.openapi_version = "3.0.2"
if self.openapi_url:
assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'"
assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'"
if self.docs_url or self.redoc_url:
assert self.openapi_url, "The openapi_url is required for the docs"
self.openapi_schema: Optional[Dict[str, Any]] = None
self.setup()
def openapi(self) -> Dict:
if not self.openapi_schema:
self.openapi_schema = get_openapi(
title=self.title,
version=self.version,
openapi_version=self.openapi_version,
description=self.description,
routes=self.routes,
openapi_prefix=self.openapi_prefix,
)
return self.openapi_schema
def setup(self) -> None:
if self.openapi_url:
async def openapi(req: Request) -> JSONResponse:
return JSONResponse(self.openapi())
self.add_route(self.openapi_url, openapi, include_in_schema=False)
openapi_url = self.openapi_prefix + self.openapi_url
if self.openapi_url and self.docs_url:
async def swagger_ui_html(req: Request) -> HTMLResponse:
return get_swagger_ui_html(
openapi_url=openapi_url,
title=self.title + " - Swagger UI",
oauth2_redirect_url=self.swagger_ui_oauth2_redirect_url,
init_oauth=self.swagger_ui_init_oauth,
)
self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False)
if self.swagger_ui_oauth2_redirect_url:
async def swagger_ui_redirect(req: Request) -> HTMLResponse:
return get_swagger_ui_oauth2_redirect_html()
self.add_route(
self.swagger_ui_oauth2_redirect_url,
swagger_ui_redirect,
include_in_schema=False,
)
if self.openapi_url and self.redoc_url:
async def redoc_html(req: Request) -> HTMLResponse:
return get_redoc_html(
openapi_url=openapi_url, title=self.title + " - ReDoc"
)
self.add_route(self.redoc_url, redoc_html, include_in_schema=False)
self.add_exception_handler(HTTPException, http_exception_handler)
self.add_exception_handler(
RequestValidationError, request_validation_exception_handler
)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if AsyncExitStack:
async with AsyncExitStack() as stack:
scope["fastapi_astack"] = stack
await super().__call__(scope, receive, send)
else:
await super().__call__(scope, receive, send) # pragma: no cover
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> None:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
self.router.add_api_route(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
def decorator(func: Callable) -> Callable:
self.router.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
self.router.add_api_websocket_route(path, endpoint, name=name)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: routing.APIRouter,
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
self.router.include_router(
router,
prefix=prefix,
tags=tags,
dependencies=dependencies,
responses=responses or {},
default_response_class=default_response_class
or self.default_response_class,
)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.get(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.put(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.post(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.delete(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
operation_id=operation_id,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.options(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.head(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.patch(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[routing.APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.router.trace(
path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-1-fixed/fastapi/fastapi/applications.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-1-buggy/fastapi/fastapi/applications.py |
fastapi-bug-7 | from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:
headers = getattr(exc, "headers", None)
if headers:
return JSONResponse(
{"detail": exc.detail}, status_code=exc.status_code, headers=headers
)
else:
return JSONResponse({"detail": exc.detail}, status_code=exc.status_code)
async def request_validation_exception_handler(
request: Request, exc: RequestValidationError
) -> JSONResponse:
return JSONResponse(
status_code=HTTP_422_UNPROCESSABLE_ENTITY, content={"detail": exc.errors()}
)
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:
headers = getattr(exc, "headers", None)
if headers:
return JSONResponse(
{"detail": exc.detail}, status_code=exc.status_code, headers=headers
)
else:
return JSONResponse({"detail": exc.detail}, status_code=exc.status_code)
async def request_validation_exception_handler(
request: Request, exc: RequestValidationError
) -> JSONResponse:
return JSONResponse(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
content={"detail": jsonable_encoder(exc.errors())},
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-7-fixed/fastapi/fastapi/exception_handlers.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-7-buggy/fastapi/fastapi/exception_handlers.py |
fastapi-bug-13 | import asyncio
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import get_body_field, get_dependant, solve_dependencies
from fastapi.encoders import jsonable_encoder
from fastapi.utils import UnconstrainedConfig
from pydantic import BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import compile_path, get_name, request_response
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
def serialize_response(*, field: Field = None, response: Response) -> Any:
encoded = jsonable_encoder(response)
if field:
errors = []
value, errors_ = field.validate(encoded, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors)
return jsonable_encoder(value)
else:
return encoded
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
content_type: Type[Response] = JSONResponse,
response_field: Field = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must me a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
raw_body = await request.form()
form_fields = {}
for field, value in raw_body.items():
form_fields[field] = value
if form_fields:
body = form_fields
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error("Error getting request body", e)
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
)
values, errors, background_tasks = await solve_dependencies(
request=request, dependant=dependant, body=body
)
if errors:
errors_out = ValidationError(errors)
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY, detail=errors_out.errors()
)
else:
assert dependant.call is not None, "dependant.call must me a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = serialize_response(
field=response_field, response=raw_response
)
return content_type(
content=response_data,
status_code=status_code,
background=background_tasks,
)
return app
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
) -> None:
assert path.startswith("/"), "Routed paths must always start with '/'"
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.response_model = response_model
if self.response_model:
assert lenient_issubclass(
content_type, JSONResponse
), "To declare a type the response must be a JSON response"
response_name = "Response_" + self.name
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators=[],
default=None,
required=False,
model_config=UnconstrainedConfig,
schema=Schema(None),
)
else:
self.response_field = None
self.status_code = status_code
self.tags = tags or []
self.summary = summary
self.description = description or self.endpoint.__doc__
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert lenient_issubclass(
model, BaseModel
), "A response model must be a Pydantic model"
response_name = f"Response_{additional_status_code}_{self.name}"
response_field = Field(
name=response_name,
type_=model,
class_validators=None,
default=None,
required=False,
model_config=UnconstrainedConfig,
schema=Schema(None),
)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], Field] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
if methods is None:
methods = ["GET"]
self.methods = methods
self.operation_id = operation_id
self.include_in_schema = include_in_schema
self.content_type = content_type
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=path, call=self.endpoint)
self.body_field = get_body_field(dependant=self.dependant, name=self.name)
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
content_type=self.content_type,
response_field=self.response_field,
)
)
class APIRouter(routing.Router):
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> None:
route = APIRoute(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
for route in router.routes:
if isinstance(route, APIRoute):
if responses is None:
responses = {}
responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
include_in_schema=route.include_in_schema,
content_type=route.content_type,
name=route.name,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=route.methods,
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
def get(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
import asyncio
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import get_body_field, get_dependant, solve_dependencies
from fastapi.encoders import jsonable_encoder
from fastapi.utils import UnconstrainedConfig
from pydantic import BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import compile_path, get_name, request_response
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
def serialize_response(*, field: Field = None, response: Response) -> Any:
encoded = jsonable_encoder(response)
if field:
errors = []
value, errors_ = field.validate(encoded, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors)
return jsonable_encoder(value)
else:
return encoded
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
content_type: Type[Response] = JSONResponse,
response_field: Field = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must me a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
raw_body = await request.form()
form_fields = {}
for field, value in raw_body.items():
form_fields[field] = value
if form_fields:
body = form_fields
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error("Error getting request body", e)
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
)
values, errors, background_tasks = await solve_dependencies(
request=request, dependant=dependant, body=body
)
if errors:
errors_out = ValidationError(errors)
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY, detail=errors_out.errors()
)
else:
assert dependant.call is not None, "dependant.call must me a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = serialize_response(
field=response_field, response=raw_response
)
return content_type(
content=response_data,
status_code=status_code,
background=background_tasks,
)
return app
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
) -> None:
assert path.startswith("/"), "Routed paths must always start with '/'"
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.response_model = response_model
if self.response_model:
assert lenient_issubclass(
content_type, JSONResponse
), "To declare a type the response must be a JSON response"
response_name = "Response_" + self.name
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators=[],
default=None,
required=False,
model_config=UnconstrainedConfig,
schema=Schema(None),
)
else:
self.response_field = None
self.status_code = status_code
self.tags = tags or []
self.summary = summary
self.description = description or self.endpoint.__doc__
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert lenient_issubclass(
model, BaseModel
), "A response model must be a Pydantic model"
response_name = f"Response_{additional_status_code}_{self.name}"
response_field = Field(
name=response_name,
type_=model,
class_validators=None,
default=None,
required=False,
model_config=UnconstrainedConfig,
schema=Schema(None),
)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], Field] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
if methods is None:
methods = ["GET"]
self.methods = methods
self.operation_id = operation_id
self.include_in_schema = include_in_schema
self.content_type = content_type
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=path, call=self.endpoint)
self.body_field = get_body_field(dependant=self.dependant, name=self.name)
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
content_type=self.content_type,
response_field=self.response_field,
)
)
class APIRouter(routing.Router):
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> None:
route = APIRoute(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
include_in_schema=route.include_in_schema,
content_type=route.content_type,
name=route.name,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=route.methods,
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
def get(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[BaseModel] = None,
status_code: int = 200,
tags: List[str] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
include_in_schema: bool = True,
content_type: Type[Response] = JSONResponse,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
include_in_schema=include_in_schema,
content_type=content_type,
name=name,
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-13-fixed/fastapi/fastapi/routing.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-13-buggy/fastapi/fastapi/routing.py |
fastapi-bug-2 | import asyncio
import inspect
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.logger import logger
from fastapi.openapi.constants import STATUS_CODES_WITH_NO_BODY
from fastapi.utils import (
PYDANTIC_1,
create_cloned_field,
create_response_field,
generate_operation_id_for_path,
get_field_info,
warning_response_model_skip_defaults_deprecated,
)
from pydantic import BaseModel
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Mount # noqa
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
try:
from pydantic.fields import FieldInfo, ModelField
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic import Schema as FieldInfo # type: ignore
from pydantic.fields import Field as ModelField # type: ignore
def _prepare_response_content(
res: Any, *, by_alias: bool = True, exclude_unset: bool
) -> Any:
if isinstance(res, BaseModel):
if PYDANTIC_1:
return res.dict(by_alias=by_alias, exclude_unset=exclude_unset)
else:
return res.dict(
by_alias=by_alias, skip_defaults=exclude_unset
) # pragma: nocover
elif isinstance(res, list):
return [
_prepare_response_content(item, exclude_unset=exclude_unset) for item in res
]
elif isinstance(res, dict):
return {
k: _prepare_response_content(v, exclude_unset=exclude_unset)
for k, v in res.items()
}
return res
async def serialize_response(
*,
field: ModelField = None,
response_content: Any,
include: Union[SetIntStr, DictIntStrAny] = None,
exclude: Union[SetIntStr, DictIntStrAny] = set(),
by_alias: bool = True,
exclude_unset: bool = False,
is_coroutine: bool = True,
) -> Any:
if field:
errors = []
response_content = _prepare_response_content(
response_content, by_alias=by_alias, exclude_unset=exclude_unset
)
if is_coroutine:
value, errors_ = field.validate(response_content, {}, loc=("response",))
else:
value, errors_ = await run_in_threadpool(
field.validate, response_content, {}, loc=("response",)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
)
else:
return jsonable_encoder(response_content)
async def run_endpoint_function(
*, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool
) -> Any:
# Only called by get_request_handler. Has been split into its own function to
# facilitate profiling endpoints, since inner functions are harder to profile.
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
return await dependant.call(**values)
else:
return await run_in_threadpool(dependant.call, **values)
def get_request_handler(
dependant: Dependant,
body_field: ModelField = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: ModelField = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(get_field_info(body_field), params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logger.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors, body=body)
else:
raw_response = await run_endpoint_function(
dependant=dependant, values=values, is_coroutine=is_coroutine
)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = await serialize_response(
field=response_field,
response_content=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
exclude_unset=response_model_exclude_unset,
is_coroutine=is_coroutine,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Optional[Type[Response]] = None,
dependency_overrides_provider: Any = None,
callbacks: Optional[List["APIRoute"]] = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
assert (
status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {status_code} must not have a response body"
response_name = "Response_" + self.unique_id
self.response_field = create_response_field(
name=response_name, type_=self.response_model
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[
ModelField
] = create_cloned_field(self.response_field)
else:
self.response_field = None # type: ignore
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0]
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert (
additional_status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {additional_status_code} must not have a response body"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = create_response_field(name=response_name, type_=model)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], ModelField] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_exclude_unset = response_model_exclude_unset
self.include_in_schema = include_in_schema
self.response_class = response_class
assert callable(endpoint), f"An endpoint must be a callable"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.callbacks = callbacks
self.app = request_response(self.get_route_handler())
def get_route_handler(self) -> Callable:
return get_request_handler(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class or JSONResponse,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_exclude_unset=self.response_model_exclude_unset,
dependency_overrides_provider=self.dependency_overrides_provider,
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
route_class: Type[APIRoute] = APIRoute,
default_response_class: Type[Response] = None,
on_startup: Sequence[Callable] = None,
on_shutdown: Sequence[Callable] = None,
) -> None:
super().__init__(
routes=routes,
redirect_slashes=redirect_slashes,
default=default,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
self.default_response_class = default_response_class
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
route_class_override: Optional[Type[APIRoute]] = None,
callbacks: List[APIRoute] = None,
) -> None:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
route_class = route_class_override or self.route_class
route = route_class(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
callbacks=callbacks,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(path, endpoint=endpoint, name=name)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_exclude_unset=route.response_model_exclude_unset,
include_in_schema=route.include_in_schema,
response_class=route.response_class or default_response_class,
name=route.name,
route_class_override=type(route),
callbacks=route.callbacks,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
for handler in router.on_startup:
self.add_event_handler("startup", handler)
for handler in router.on_shutdown:
self.add_event_handler("shutdown", handler)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
import asyncio
import inspect
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.logger import logger
from fastapi.openapi.constants import STATUS_CODES_WITH_NO_BODY
from fastapi.utils import (
PYDANTIC_1,
create_cloned_field,
create_response_field,
generate_operation_id_for_path,
get_field_info,
warning_response_model_skip_defaults_deprecated,
)
from pydantic import BaseModel
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import Mount # noqa
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
try:
from pydantic.fields import FieldInfo, ModelField
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic import Schema as FieldInfo # type: ignore
from pydantic.fields import Field as ModelField # type: ignore
def _prepare_response_content(
res: Any, *, by_alias: bool = True, exclude_unset: bool
) -> Any:
if isinstance(res, BaseModel):
if PYDANTIC_1:
return res.dict(by_alias=by_alias, exclude_unset=exclude_unset)
else:
return res.dict(
by_alias=by_alias, skip_defaults=exclude_unset
) # pragma: nocover
elif isinstance(res, list):
return [
_prepare_response_content(item, exclude_unset=exclude_unset) for item in res
]
elif isinstance(res, dict):
return {
k: _prepare_response_content(v, exclude_unset=exclude_unset)
for k, v in res.items()
}
return res
async def serialize_response(
*,
field: ModelField = None,
response_content: Any,
include: Union[SetIntStr, DictIntStrAny] = None,
exclude: Union[SetIntStr, DictIntStrAny] = set(),
by_alias: bool = True,
exclude_unset: bool = False,
is_coroutine: bool = True,
) -> Any:
if field:
errors = []
response_content = _prepare_response_content(
response_content, by_alias=by_alias, exclude_unset=exclude_unset
)
if is_coroutine:
value, errors_ = field.validate(response_content, {}, loc=("response",))
else:
value, errors_ = await run_in_threadpool(
field.validate, response_content, {}, loc=("response",)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
)
else:
return jsonable_encoder(response_content)
async def run_endpoint_function(
*, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool
) -> Any:
# Only called by get_request_handler. Has been split into its own function to
# facilitate profiling endpoints, since inner functions are harder to profile.
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
return await dependant.call(**values)
else:
return await run_in_threadpool(dependant.call, **values)
def get_request_handler(
dependant: Dependant,
body_field: ModelField = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: ModelField = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(get_field_info(body_field), params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logger.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors, body=body)
else:
raw_response = await run_endpoint_function(
dependant=dependant, values=values, is_coroutine=is_coroutine
)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = await serialize_response(
field=response_field,
response_content=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
exclude_unset=response_model_exclude_unset,
is_coroutine=is_coroutine,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Optional[Type[Response]] = None,
dependency_overrides_provider: Any = None,
callbacks: Optional[List["APIRoute"]] = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
assert (
status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {status_code} must not have a response body"
response_name = "Response_" + self.unique_id
self.response_field = create_response_field(
name=response_name, type_=self.response_model
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[
ModelField
] = create_cloned_field(self.response_field)
else:
self.response_field = None # type: ignore
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0]
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert (
additional_status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {additional_status_code} must not have a response body"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = create_response_field(name=response_name, type_=model)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], ModelField] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_exclude_unset = response_model_exclude_unset
self.include_in_schema = include_in_schema
self.response_class = response_class
assert callable(endpoint), f"An endpoint must be a callable"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.callbacks = callbacks
self.app = request_response(self.get_route_handler())
def get_route_handler(self) -> Callable:
return get_request_handler(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class or JSONResponse,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_exclude_unset=self.response_model_exclude_unset,
dependency_overrides_provider=self.dependency_overrides_provider,
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
route_class: Type[APIRoute] = APIRoute,
default_response_class: Type[Response] = None,
on_startup: Sequence[Callable] = None,
on_shutdown: Sequence[Callable] = None,
) -> None:
super().__init__(
routes=routes,
redirect_slashes=redirect_slashes,
default=default,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
self.default_response_class = default_response_class
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
route_class_override: Optional[Type[APIRoute]] = None,
callbacks: List[APIRoute] = None,
) -> None:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
route_class = route_class_override or self.route_class
route = route_class(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
callbacks=callbacks,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(
path,
endpoint=endpoint,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_exclude_unset=route.response_model_exclude_unset,
include_in_schema=route.include_in_schema,
response_class=route.response_class or default_response_class,
name=route.name,
route_class_override=type(route),
callbacks=route.callbacks,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
for handler in router.on_startup:
self.add_event_handler("startup", handler)
for handler in router.on_shutdown:
self.add_event_handler("shutdown", handler)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = None,
response_model_exclude_unset: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
callbacks: List[APIRoute] = None,
) -> Callable:
if response_model_skip_defaults is not None:
warning_response_model_skip_defaults_deprecated() # pragma: nocover
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=bool(
response_model_exclude_unset or response_model_skip_defaults
),
include_in_schema=include_in_schema,
response_class=response_class or self.default_response_class,
name=name,
callbacks=callbacks,
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-2-fixed/fastapi/fastapi/routing.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-2-buggy/fastapi/fastapi/routing.py |
fastapi-bug-4 | import http.client
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, cast
from fastapi import routing
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import get_flat_dependant
from fastapi.encoders import jsonable_encoder
from fastapi.openapi.constants import (
METHODS_WITH_BODY,
REF_PREFIX,
STATUS_CODES_WITH_NO_BODY,
)
from fastapi.openapi.models import OpenAPI
from fastapi.params import Body, Param
from fastapi.utils import (
generate_operation_id_for_path,
get_field_info,
get_flat_models_from_routes,
get_model_definitions,
)
from pydantic import BaseModel
from pydantic.schema import field_schema, get_model_name_map
from pydantic.utils import lenient_issubclass
from starlette.responses import JSONResponse
from starlette.routing import BaseRoute
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
try:
from pydantic.fields import ModelField
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
validation_error_definition = {
"title": "ValidationError",
"type": "object",
"properties": {
"loc": {"title": "Location", "type": "array", "items": {"type": "string"}},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
"required": ["loc", "msg", "type"],
}
validation_error_response_definition = {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": REF_PREFIX + "ValidationError"},
}
},
}
status_code_ranges: Dict[str, str] = {
"1XX": "Information",
"2XX": "Success",
"3XX": "Redirection",
"4XX": "Client Error",
"5XX": "Server Error",
"DEFAULT": "Default Response",
}
def get_openapi_params(dependant: Dependant) -> List[ModelField]:
flat_dependant = get_flat_dependant(dependant, skip_repeats=True)
return (
flat_dependant.path_params
+ flat_dependant.query_params
+ flat_dependant.header_params
+ flat_dependant.cookie_params
)
def get_openapi_security_definitions(flat_dependant: Dependant) -> Tuple[Dict, List]:
security_definitions = {}
operation_security = []
for security_requirement in flat_dependant.security_requirements:
security_definition = jsonable_encoder(
security_requirement.security_scheme.model,
by_alias=True,
include_none=False,
)
security_name = security_requirement.security_scheme.scheme_name
security_definitions[security_name] = security_definition
operation_security.append({security_name: security_requirement.scopes})
return security_definitions, operation_security
def get_openapi_operation_parameters(
all_route_params: Sequence[ModelField],
) -> List[Dict[str, Any]]:
parameters = []
for param in all_route_params:
field_info = get_field_info(param)
field_info = cast(Param, field_info)
parameter = {
"name": param.alias,
"in": field_info.in_.value,
"required": param.required,
"schema": field_schema(param, model_name_map={})[0],
}
if field_info.description:
parameter["description"] = field_info.description
if field_info.deprecated:
parameter["deprecated"] = field_info.deprecated
parameters.append(parameter)
return parameters
def get_openapi_operation_request_body(
*, body_field: Optional[ModelField], model_name_map: Dict[Type[BaseModel], str]
) -> Optional[Dict]:
if not body_field:
return None
assert isinstance(body_field, ModelField)
body_schema, _, _ = field_schema(
body_field, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
field_info = cast(Body, get_field_info(body_field))
request_media_type = field_info.media_type
required = body_field.required
request_body_oai: Dict[str, Any] = {}
if required:
request_body_oai["required"] = required
request_body_oai["content"] = {request_media_type: {"schema": body_schema}}
return request_body_oai
def generate_operation_id(*, route: routing.APIRoute, method: str) -> str:
if route.operation_id:
return route.operation_id
path: str = route.path_format
return generate_operation_id_for_path(name=route.name, path=path, method=method)
def generate_operation_summary(*, route: routing.APIRoute, method: str) -> str:
if route.summary:
return route.summary
return route.name.replace("_", " ").title()
def get_openapi_operation_metadata(*, route: routing.APIRoute, method: str) -> Dict:
operation: Dict[str, Any] = {}
if route.tags:
operation["tags"] = route.tags
operation["summary"] = generate_operation_summary(route=route, method=method)
if route.description:
operation["description"] = route.description
operation["operationId"] = generate_operation_id(route=route, method=method)
if route.deprecated:
operation["deprecated"] = route.deprecated
return operation
def get_openapi_path(
*, route: routing.APIRoute, model_name_map: Dict[Type, str]
) -> Tuple[Dict, Dict, Dict]:
path = {}
security_schemes: Dict[str, Any] = {}
definitions: Dict[str, Any] = {}
assert route.methods is not None, "Methods must be a list"
assert route.response_class, "A response class is needed to generate OpenAPI"
route_response_media_type: Optional[str] = route.response_class.media_type
if route.include_in_schema:
for method in route.methods:
operation = get_openapi_operation_metadata(route=route, method=method)
parameters: List[Dict] = []
flat_dependant = get_flat_dependant(route.dependant, skip_repeats=True)
security_definitions, operation_security = get_openapi_security_definitions(
flat_dependant=flat_dependant
)
if operation_security:
operation.setdefault("security", []).extend(operation_security)
if security_definitions:
security_schemes.update(security_definitions)
all_route_params = get_openapi_params(route.dependant)
operation_parameters = get_openapi_operation_parameters(all_route_params)
parameters.extend(operation_parameters)
if parameters:
operation["parameters"] = parameters
if method in METHODS_WITH_BODY:
request_body_oai = get_openapi_operation_request_body(
body_field=route.body_field, model_name_map=model_name_map
)
if request_body_oai:
operation["requestBody"] = request_body_oai
if route.callbacks:
callbacks = {}
for callback in route.callbacks:
cb_path, cb_security_schemes, cb_definitions, = get_openapi_path(
route=callback, model_name_map=model_name_map
)
callbacks[callback.name] = {callback.path: cb_path}
operation["callbacks"] = callbacks
if route.responses:
for (additional_status_code, response) in route.responses.items():
assert isinstance(
response, dict
), "An additional response must be a dict"
field = route.response_fields.get(additional_status_code)
if field:
response_schema, _, _ = field_schema(
field, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
response.setdefault("content", {}).setdefault(
route_response_media_type or "application/json", {}
)["schema"] = response_schema
status_text: Optional[str] = status_code_ranges.get(
str(additional_status_code).upper()
) or http.client.responses.get(int(additional_status_code))
response.setdefault(
"description", status_text or "Additional Response"
)
status_code_key = str(additional_status_code).upper()
if status_code_key == "DEFAULT":
status_code_key = "default"
operation.setdefault("responses", {})[status_code_key] = response
status_code = str(route.status_code)
operation.setdefault("responses", {}).setdefault(status_code, {})[
"description"
] = route.response_description
if (
route_response_media_type
and route.status_code not in STATUS_CODES_WITH_NO_BODY
):
response_schema = {"type": "string"}
if lenient_issubclass(route.response_class, JSONResponse):
if route.response_field:
response_schema, _, _ = field_schema(
route.response_field,
model_name_map=model_name_map,
ref_prefix=REF_PREFIX,
)
else:
response_schema = {}
operation.setdefault("responses", {}).setdefault(
status_code, {}
).setdefault("content", {}).setdefault(route_response_media_type, {})[
"schema"
] = response_schema
http422 = str(HTTP_422_UNPROCESSABLE_ENTITY)
if (all_route_params or route.body_field) and not any(
[
status in operation["responses"]
for status in [http422, "4XX", "default"]
]
):
operation["responses"][http422] = {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {"$ref": REF_PREFIX + "HTTPValidationError"}
}
},
}
if "ValidationError" not in definitions:
definitions.update(
{
"ValidationError": validation_error_definition,
"HTTPValidationError": validation_error_response_definition,
}
)
path[method.lower()] = operation
return path, security_schemes, definitions
def get_openapi(
*,
title: str,
version: str,
openapi_version: str = "3.0.2",
description: str = None,
routes: Sequence[BaseRoute],
openapi_prefix: str = ""
) -> Dict:
info = {"title": title, "version": version}
if description:
info["description"] = description
output = {"openapi": openapi_version, "info": info}
components: Dict[str, Dict] = {}
paths: Dict[str, Dict] = {}
flat_models = get_flat_models_from_routes(routes)
model_name_map = get_model_name_map(flat_models)
definitions = get_model_definitions(
flat_models=flat_models, model_name_map=model_name_map
)
for route in routes:
if isinstance(route, routing.APIRoute):
result = get_openapi_path(route=route, model_name_map=model_name_map)
if result:
path, security_schemes, path_definitions = result
if path:
paths.setdefault(openapi_prefix + route.path_format, {}).update(
path
)
if security_schemes:
components.setdefault("securitySchemes", {}).update(
security_schemes
)
if path_definitions:
definitions.update(path_definitions)
if definitions:
components["schemas"] = {k: definitions[k] for k in sorted(definitions)}
if components:
output["components"] = components
output["paths"] = paths
return jsonable_encoder(OpenAPI(**output), by_alias=True, include_none=False)
import http.client
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, cast
from fastapi import routing
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import get_flat_dependant
from fastapi.encoders import jsonable_encoder
from fastapi.openapi.constants import (
METHODS_WITH_BODY,
REF_PREFIX,
STATUS_CODES_WITH_NO_BODY,
)
from fastapi.openapi.models import OpenAPI
from fastapi.params import Body, Param
from fastapi.utils import (
generate_operation_id_for_path,
get_field_info,
get_flat_models_from_routes,
get_model_definitions,
)
from pydantic import BaseModel
from pydantic.schema import field_schema, get_model_name_map
from pydantic.utils import lenient_issubclass
from starlette.responses import JSONResponse
from starlette.routing import BaseRoute
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
try:
from pydantic.fields import ModelField
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
validation_error_definition = {
"title": "ValidationError",
"type": "object",
"properties": {
"loc": {"title": "Location", "type": "array", "items": {"type": "string"}},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
"required": ["loc", "msg", "type"],
}
validation_error_response_definition = {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": REF_PREFIX + "ValidationError"},
}
},
}
status_code_ranges: Dict[str, str] = {
"1XX": "Information",
"2XX": "Success",
"3XX": "Redirection",
"4XX": "Client Error",
"5XX": "Server Error",
"DEFAULT": "Default Response",
}
def get_openapi_params(dependant: Dependant) -> List[ModelField]:
flat_dependant = get_flat_dependant(dependant, skip_repeats=True)
return (
flat_dependant.path_params
+ flat_dependant.query_params
+ flat_dependant.header_params
+ flat_dependant.cookie_params
)
def get_openapi_security_definitions(flat_dependant: Dependant) -> Tuple[Dict, List]:
security_definitions = {}
operation_security = []
for security_requirement in flat_dependant.security_requirements:
security_definition = jsonable_encoder(
security_requirement.security_scheme.model,
by_alias=True,
include_none=False,
)
security_name = security_requirement.security_scheme.scheme_name
security_definitions[security_name] = security_definition
operation_security.append({security_name: security_requirement.scopes})
return security_definitions, operation_security
def get_openapi_operation_parameters(
all_route_params: Sequence[ModelField],
) -> List[Dict[str, Any]]:
parameters = []
for param in all_route_params:
field_info = get_field_info(param)
field_info = cast(Param, field_info)
parameter = {
"name": param.alias,
"in": field_info.in_.value,
"required": param.required,
"schema": field_schema(param, model_name_map={})[0],
}
if field_info.description:
parameter["description"] = field_info.description
if field_info.deprecated:
parameter["deprecated"] = field_info.deprecated
parameters.append(parameter)
return parameters
def get_openapi_operation_request_body(
*, body_field: Optional[ModelField], model_name_map: Dict[Type[BaseModel], str]
) -> Optional[Dict]:
if not body_field:
return None
assert isinstance(body_field, ModelField)
body_schema, _, _ = field_schema(
body_field, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
field_info = cast(Body, get_field_info(body_field))
request_media_type = field_info.media_type
required = body_field.required
request_body_oai: Dict[str, Any] = {}
if required:
request_body_oai["required"] = required
request_body_oai["content"] = {request_media_type: {"schema": body_schema}}
return request_body_oai
def generate_operation_id(*, route: routing.APIRoute, method: str) -> str:
if route.operation_id:
return route.operation_id
path: str = route.path_format
return generate_operation_id_for_path(name=route.name, path=path, method=method)
def generate_operation_summary(*, route: routing.APIRoute, method: str) -> str:
if route.summary:
return route.summary
return route.name.replace("_", " ").title()
def get_openapi_operation_metadata(*, route: routing.APIRoute, method: str) -> Dict:
operation: Dict[str, Any] = {}
if route.tags:
operation["tags"] = route.tags
operation["summary"] = generate_operation_summary(route=route, method=method)
if route.description:
operation["description"] = route.description
operation["operationId"] = generate_operation_id(route=route, method=method)
if route.deprecated:
operation["deprecated"] = route.deprecated
return operation
def get_openapi_path(
*, route: routing.APIRoute, model_name_map: Dict[Type, str]
) -> Tuple[Dict, Dict, Dict]:
path = {}
security_schemes: Dict[str, Any] = {}
definitions: Dict[str, Any] = {}
assert route.methods is not None, "Methods must be a list"
assert route.response_class, "A response class is needed to generate OpenAPI"
route_response_media_type: Optional[str] = route.response_class.media_type
if route.include_in_schema:
for method in route.methods:
operation = get_openapi_operation_metadata(route=route, method=method)
parameters: List[Dict] = []
flat_dependant = get_flat_dependant(route.dependant, skip_repeats=True)
security_definitions, operation_security = get_openapi_security_definitions(
flat_dependant=flat_dependant
)
if operation_security:
operation.setdefault("security", []).extend(operation_security)
if security_definitions:
security_schemes.update(security_definitions)
all_route_params = get_openapi_params(route.dependant)
operation_parameters = get_openapi_operation_parameters(all_route_params)
parameters.extend(operation_parameters)
if parameters:
operation["parameters"] = list(
{param["name"]: param for param in parameters}.values()
)
if method in METHODS_WITH_BODY:
request_body_oai = get_openapi_operation_request_body(
body_field=route.body_field, model_name_map=model_name_map
)
if request_body_oai:
operation["requestBody"] = request_body_oai
if route.callbacks:
callbacks = {}
for callback in route.callbacks:
cb_path, cb_security_schemes, cb_definitions, = get_openapi_path(
route=callback, model_name_map=model_name_map
)
callbacks[callback.name] = {callback.path: cb_path}
operation["callbacks"] = callbacks
if route.responses:
for (additional_status_code, response) in route.responses.items():
assert isinstance(
response, dict
), "An additional response must be a dict"
field = route.response_fields.get(additional_status_code)
if field:
response_schema, _, _ = field_schema(
field, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
response.setdefault("content", {}).setdefault(
route_response_media_type or "application/json", {}
)["schema"] = response_schema
status_text: Optional[str] = status_code_ranges.get(
str(additional_status_code).upper()
) or http.client.responses.get(int(additional_status_code))
response.setdefault(
"description", status_text or "Additional Response"
)
status_code_key = str(additional_status_code).upper()
if status_code_key == "DEFAULT":
status_code_key = "default"
operation.setdefault("responses", {})[status_code_key] = response
status_code = str(route.status_code)
operation.setdefault("responses", {}).setdefault(status_code, {})[
"description"
] = route.response_description
if (
route_response_media_type
and route.status_code not in STATUS_CODES_WITH_NO_BODY
):
response_schema = {"type": "string"}
if lenient_issubclass(route.response_class, JSONResponse):
if route.response_field:
response_schema, _, _ = field_schema(
route.response_field,
model_name_map=model_name_map,
ref_prefix=REF_PREFIX,
)
else:
response_schema = {}
operation.setdefault("responses", {}).setdefault(
status_code, {}
).setdefault("content", {}).setdefault(route_response_media_type, {})[
"schema"
] = response_schema
http422 = str(HTTP_422_UNPROCESSABLE_ENTITY)
if (all_route_params or route.body_field) and not any(
[
status in operation["responses"]
for status in [http422, "4XX", "default"]
]
):
operation["responses"][http422] = {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {"$ref": REF_PREFIX + "HTTPValidationError"}
}
},
}
if "ValidationError" not in definitions:
definitions.update(
{
"ValidationError": validation_error_definition,
"HTTPValidationError": validation_error_response_definition,
}
)
path[method.lower()] = operation
return path, security_schemes, definitions
def get_openapi(
*,
title: str,
version: str,
openapi_version: str = "3.0.2",
description: str = None,
routes: Sequence[BaseRoute],
openapi_prefix: str = ""
) -> Dict:
info = {"title": title, "version": version}
if description:
info["description"] = description
output = {"openapi": openapi_version, "info": info}
components: Dict[str, Dict] = {}
paths: Dict[str, Dict] = {}
flat_models = get_flat_models_from_routes(routes)
model_name_map = get_model_name_map(flat_models)
definitions = get_model_definitions(
flat_models=flat_models, model_name_map=model_name_map
)
for route in routes:
if isinstance(route, routing.APIRoute):
result = get_openapi_path(route=route, model_name_map=model_name_map)
if result:
path, security_schemes, path_definitions = result
if path:
paths.setdefault(openapi_prefix + route.path_format, {}).update(
path
)
if security_schemes:
components.setdefault("securitySchemes", {}).update(
security_schemes
)
if path_definitions:
definitions.update(path_definitions)
if definitions:
components["schemas"] = {k: definitions[k] for k in sorted(definitions)}
if components:
output["components"] = components
output["paths"] = paths
return jsonable_encoder(OpenAPI(**output), by_alias=True, include_none=False)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-4-fixed/fastapi/fastapi/openapi/utils.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-4-buggy/fastapi/fastapi/openapi/utils.py |
fastapi-bug-8 | import asyncio
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.utils import create_cloned_field, generate_operation_id_for_path
from pydantic import BaseConfig, BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
def serialize_response(
*,
field: Field = None,
response: Response,
include: Union[SetIntStr, DictIntStrAny] = None,
exclude: Union[SetIntStr, DictIntStrAny] = set(),
by_alias: bool = True,
skip_defaults: bool = False,
) -> Any:
if field:
errors = []
if skip_defaults and isinstance(response, BaseModel):
response = response.dict(skip_defaults=skip_defaults)
value, errors_ = field.validate(response, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
)
else:
return jsonable_encoder(response)
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: Field = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors)
else:
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = serialize_response(
field=response_field,
response=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
skip_defaults=response_model_skip_defaults,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Optional[Type[Response]] = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
response_name = "Response_" + self.unique_id
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[Field] = create_cloned_field(
self.response_field
)
else:
self.response_field = None
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0]
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert lenient_issubclass(
model, BaseModel
), "A response model must be a Pydantic model"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = Field(
name=response_name,
type_=model,
class_validators=None,
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], Field] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_skip_defaults = response_model_skip_defaults
self.include_in_schema = include_in_schema
self.response_class = response_class
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class or JSONResponse,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_skip_defaults=self.response_model_skip_defaults,
dependency_overrides_provider=self.dependency_overrides_provider,
)
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
route_class: Type[APIRoute] = APIRoute,
) -> None:
super().__init__(
routes=routes, redirect_slashes=redirect_slashes, default=default
)
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> None:
route = self.route_class(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(path, endpoint=endpoint, name=name)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_skip_defaults=route.response_model_skip_defaults,
include_in_schema=route.include_in_schema,
response_class=route.response_class or default_response_class,
name=route.name,
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
import asyncio
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
from fastapi import params
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.utils import create_cloned_field, generate_operation_id_for_path
from pydantic import BaseConfig, BaseModel, Schema
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import Field
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
def serialize_response(
*,
field: Field = None,
response: Response,
include: Union[SetIntStr, DictIntStrAny] = None,
exclude: Union[SetIntStr, DictIntStrAny] = set(),
by_alias: bool = True,
skip_defaults: bool = False,
) -> Any:
if field:
errors = []
if skip_defaults and isinstance(response, BaseModel):
response = response.dict(skip_defaults=skip_defaults)
value, errors_ = field.validate(response, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
)
else:
return jsonable_encoder(response)
def get_app(
dependant: Dependant,
body_field: Field = None,
status_code: int = 200,
response_class: Type[Response] = JSONResponse,
response_field: Field = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
dependency_overrides_provider: Any = None,
) -> Callable:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.schema, params.Form)
async def app(request: Request) -> Response:
try:
body = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
body = await request.json()
except Exception as e:
logging.error(f"Error getting request body: {e}")
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors)
else:
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
raw_response = await dependant.call(**values)
else:
raw_response = await run_in_threadpool(dependant.call, **values)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = serialize_response(
field=response_field,
response=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
skip_defaults=response_model_skip_defaults,
)
response = response_class(
content=response_data,
status_code=status_code,
background=background_tasks,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Any = None
) -> Callable:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable,
*,
name: str = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
name: str = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Optional[Type[Response]] = None,
dependency_overrides_provider: Any = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods = set([method.upper() for method in methods])
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
response_name = "Response_" + self.unique_id
self.response_field: Optional[Field] = Field(
name=response_name,
type_=self.response_model,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[Field] = create_cloned_field(
self.response_field
)
else:
self.response_field = None
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0]
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert lenient_issubclass(
model, BaseModel
), "A response model must be a Pydantic model"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = Field(
name=response_name,
type_=model,
class_validators=None,
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], Field] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_skip_defaults = response_model_skip_defaults
self.include_in_schema = include_in_schema
self.response_class = response_class
assert inspect.isfunction(endpoint) or inspect.ismethod(
endpoint
), f"An endpoint must be a function or method"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.app = request_response(
get_app(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class or JSONResponse,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_skip_defaults=self.response_model_skip_defaults,
dependency_overrides_provider=self.dependency_overrides_provider,
)
)
class APIRouter(routing.Router):
def __init__(
self,
routes: List[routing.BaseRoute] = None,
redirect_slashes: bool = True,
default: ASGIApp = None,
dependency_overrides_provider: Any = None,
route_class: Type[APIRoute] = APIRoute,
) -> None:
super().__init__(
routes=routes, redirect_slashes=redirect_slashes, default=default
)
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
def add_api_route(
self,
path: str,
endpoint: Callable,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
route_class_override: Optional[Type[APIRoute]] = None,
) -> None:
route_class = route_class_override or self.route_class
route = route_class(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
methods: List[str] = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable, name: str = None
) -> None:
route = APIWebSocketRoute(path, endpoint=endpoint, name=name)
self.routes.append(route)
def websocket(self, path: str, name: str = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
responses: Dict[Union[int, str], Dict[str, Any]] = None,
default_response_class: Optional[Type[Response]] = None,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=(route.tags or []) + (tags or []),
dependencies=list(dependencies or [])
+ list(route.dependencies or []),
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_skip_defaults=route.response_model_skip_defaults,
include_in_schema=route.include_in_schema,
response_class=route.response_class or default_response_class,
name=route.name,
route_class_override=type(route),
)
elif isinstance(route, routing.Route):
self.add_route(
prefix + route.path,
route.endpoint,
methods=list(route.methods or []),
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
def get(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def put(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def post(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def delete(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def options(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def head(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def patch(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
def trace(
self,
path: str,
*,
response_model: Type[Any] = None,
status_code: int = 200,
tags: List[str] = None,
dependencies: Sequence[params.Depends] = None,
summary: str = None,
description: str = None,
response_description: str = "Successful Response",
responses: Dict[Union[int, str], Dict[str, Any]] = None,
deprecated: bool = None,
operation_id: str = None,
response_model_include: Union[SetIntStr, DictIntStrAny] = None,
response_model_exclude: Union[SetIntStr, DictIntStrAny] = set(),
response_model_by_alias: bool = True,
response_model_skip_defaults: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = None,
name: str = None,
) -> Callable:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags or [],
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses or {},
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_skip_defaults=response_model_skip_defaults,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
)
| BugsInPy/BugsInPy/temp/projects/fastapi/bug-8-fixed/fastapi/fastapi/routing.py,BugsInPy/BugsInPy/temp/projects/fastapi/bug-8-buggy/fastapi/fastapi/routing.py |
scrapy-bug-5 | """
This module implements the Response class which is used to represent HTTP
responses in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from six.moves.urllib.parse import urljoin
from scrapy.http.request import Request
from scrapy.http.headers import Headers
from scrapy.link import Link
from scrapy.utils.trackref import object_ref
from scrapy.http.common import obsolete_setter
from scrapy.exceptions import NotSupported
class Response(object_ref):
def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):
self.headers = Headers(headers or {})
self.status = int(status)
self._set_body(body)
self._set_url(url)
self.request = request
self.flags = [] if flags is None else list(flags)
@property
def meta(self):
try:
return self.request.meta
except AttributeError:
raise AttributeError(
"Response.meta not available, this response "
"is not tied to any request"
)
def _get_url(self):
return self._url
def _set_url(self, url):
if isinstance(url, str):
self._url = url
else:
raise TypeError('%s url must be str, got %s:' % (type(self).__name__,
type(url).__name__))
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
elif not isinstance(body, bytes):
raise TypeError(
"Response body must be bytes. "
"If you want to pass unicode body use TextResponse "
"or HtmlResponse.")
else:
self._body = body
body = property(_get_body, obsolete_setter(_set_body, 'body'))
def __str__(self):
return "<%d %s>" % (self.status, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Response"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Response with the same attributes except for those
given new values.
"""
for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
@property
def text(self):
"""For subclasses of TextResponse, this will return the body
as text (unicode object in Python 2 and str in Python 3)
"""
raise AttributeError("Response content isn't text")
def css(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def xpath(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def follow(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None):
# type: (...) -> Request
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__`` method,
but ``url`` can be a relative URL or a ``scrapy.link.Link`` object,
not only an absolute URL.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
"""
if isinstance(url, Link):
url = url.url
url = self.urljoin(url)
return Request(url, callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback)
"""
This module implements the Response class which is used to represent HTTP
responses in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from six.moves.urllib.parse import urljoin
from scrapy.http.request import Request
from scrapy.http.headers import Headers
from scrapy.link import Link
from scrapy.utils.trackref import object_ref
from scrapy.http.common import obsolete_setter
from scrapy.exceptions import NotSupported
class Response(object_ref):
def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):
self.headers = Headers(headers or {})
self.status = int(status)
self._set_body(body)
self._set_url(url)
self.request = request
self.flags = [] if flags is None else list(flags)
@property
def meta(self):
try:
return self.request.meta
except AttributeError:
raise AttributeError(
"Response.meta not available, this response "
"is not tied to any request"
)
def _get_url(self):
return self._url
def _set_url(self, url):
if isinstance(url, str):
self._url = url
else:
raise TypeError('%s url must be str, got %s:' % (type(self).__name__,
type(url).__name__))
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
elif not isinstance(body, bytes):
raise TypeError(
"Response body must be bytes. "
"If you want to pass unicode body use TextResponse "
"or HtmlResponse.")
else:
self._body = body
body = property(_get_body, obsolete_setter(_set_body, 'body'))
def __str__(self):
return "<%d %s>" % (self.status, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Response"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Response with the same attributes except for those
given new values.
"""
for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
@property
def text(self):
"""For subclasses of TextResponse, this will return the body
as text (unicode object in Python 2 and str in Python 3)
"""
raise AttributeError("Response content isn't text")
def css(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def xpath(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def follow(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None):
# type: (...) -> Request
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__`` method,
but ``url`` can be a relative URL or a ``scrapy.link.Link`` object,
not only an absolute URL.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
"""
if isinstance(url, Link):
url = url.url
elif url is None:
raise ValueError("url can't be None")
url = self.urljoin(url)
return Request(url, callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-5-fixed/scrapy/scrapy/http/response/__init__.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-5-buggy/scrapy/scrapy/http/response/__init__.py |
scrapy-bug-12 | """
XPath selectors based on lxml
"""
import warnings
from parsel import Selector as _ParselSelector
from scrapy.utils.trackref import object_ref
from scrapy.utils.python import to_bytes
from scrapy.http import HtmlResponse, XmlResponse
from scrapy.utils.decorators import deprecated
from scrapy.exceptions import ScrapyDeprecationWarning
__all__ = ['Selector', 'SelectorList']
def _st(response, st):
if st is None:
return 'xml' if isinstance(response, XmlResponse) else 'html'
return st
def _response_from_text(text, st):
rt = XmlResponse if st == 'xml' else HtmlResponse
return rt(url='about:blank', encoding='utf-8',
body=to_bytes(text, 'utf-8'))
class SelectorList(_ParselSelector.selectorlist_cls, object_ref):
@deprecated(use_instead='.extract()')
def extract_unquoted(self):
return [x.extract_unquoted() for x in self]
@deprecated(use_instead='.xpath()')
def x(self, xpath):
return self.select(xpath)
@deprecated(use_instead='.xpath()')
def select(self, xpath):
return self.xpath(xpath)
class Selector(_ParselSelector, object_ref):
__slots__ = ['response']
selectorlist_cls = SelectorList
def __init__(self, response=None, text=None, type=None, root=None, _root=None, **kwargs):
st = _st(response, type or self._default_type)
if _root is not None:
warnings.warn("Argument `_root` is deprecated, use `root` instead",
ScrapyDeprecationWarning, stacklevel=2)
if root is None:
root = _root
else:
warnings.warn("Ignoring deprecated `_root` argument, using provided `root`")
if text is not None:
response = _response_from_text(text, st)
if response is not None:
text = response.text
kwargs.setdefault('base_url', response.url)
self.response = response
super(Selector, self).__init__(text=text, type=st, root=root, **kwargs)
# Deprecated api
@property
def _root(self):
warnings.warn("Attribute `_root` is deprecated, use `root` instead",
ScrapyDeprecationWarning, stacklevel=2)
return self.root
@deprecated(use_instead='.xpath()')
def select(self, xpath):
return self.xpath(xpath)
@deprecated(use_instead='.extract()')
def extract_unquoted(self):
return self.extract()
"""
XPath selectors based on lxml
"""
import warnings
from parsel import Selector as _ParselSelector
from scrapy.utils.trackref import object_ref
from scrapy.utils.python import to_bytes
from scrapy.http import HtmlResponse, XmlResponse
from scrapy.utils.decorators import deprecated
from scrapy.exceptions import ScrapyDeprecationWarning
__all__ = ['Selector', 'SelectorList']
def _st(response, st):
if st is None:
return 'xml' if isinstance(response, XmlResponse) else 'html'
return st
def _response_from_text(text, st):
rt = XmlResponse if st == 'xml' else HtmlResponse
return rt(url='about:blank', encoding='utf-8',
body=to_bytes(text, 'utf-8'))
class SelectorList(_ParselSelector.selectorlist_cls, object_ref):
@deprecated(use_instead='.extract()')
def extract_unquoted(self):
return [x.extract_unquoted() for x in self]
@deprecated(use_instead='.xpath()')
def x(self, xpath):
return self.select(xpath)
@deprecated(use_instead='.xpath()')
def select(self, xpath):
return self.xpath(xpath)
class Selector(_ParselSelector, object_ref):
__slots__ = ['response']
selectorlist_cls = SelectorList
def __init__(self, response=None, text=None, type=None, root=None, _root=None, **kwargs):
st = _st(response, type or self._default_type)
if _root is not None:
warnings.warn("Argument `_root` is deprecated, use `root` instead",
ScrapyDeprecationWarning, stacklevel=2)
if root is None:
root = _root
else:
warnings.warn("Ignoring deprecated `_root` argument, using provided `root`")
if text is not None:
response = _response_from_text(text, st)
if response is not None:
text = response.text
kwargs.setdefault('base_url', response.url)
self.response = response
super(Selector, self).__init__(text=text, type=st, root=root, **kwargs)
# Deprecated api
@property
def _root(self):
warnings.warn("Attribute `_root` is deprecated, use `root` instead",
ScrapyDeprecationWarning, stacklevel=2)
return self.root
@deprecated(use_instead='.xpath()')
def select(self, xpath):
return self.xpath(xpath)
@deprecated(use_instead='.extract()')
def extract_unquoted(self):
return self.extract()
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-12-fixed/scrapy/scrapy/selector/unified.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-12-buggy/scrapy/scrapy/selector/unified.py |
scrapy-bug-32 | import six
import signal
import logging
import warnings
from twisted.internet import reactor, defer
from zope.interface.verify import verifyClass, DoesNotImplement
from scrapy.core.engine import ExecutionEngine
from scrapy.resolver import CachingThreadedResolver
from scrapy.interfaces import ISpiderLoader
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings
from scrapy.signalmanager import SignalManager
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.misc import load_object
from scrapy.utils.log import LogCounterHandler, configure_logging, log_scrapy_info
from scrapy import signals
logger = logging.getLogger(__name__)
class Crawler(object):
def __init__(self, spidercls, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.spidercls = spidercls
self.settings = settings.copy()
self.signals = SignalManager(self)
self.stats = load_object(self.settings['STATS_CLASS'])(self)
handler = LogCounterHandler(self, level=settings.get('LOG_LEVEL'))
logging.root.addHandler(handler)
# lambda is assigned to Crawler attribute because this way it is not
# garbage collected after leaving __init__ scope
self.__remove_handler = lambda: logging.root.removeHandler(handler)
self.signals.connect(self.__remove_handler, signals.engine_stopped)
lf_cls = load_object(self.settings['LOG_FORMATTER'])
self.logformatter = lf_cls.from_crawler(self)
self.extensions = ExtensionManager.from_crawler(self)
self.spidercls.update_settings(self.settings)
self.settings.freeze()
self.crawling = False
self.spider = None
self.engine = None
@property
def spiders(self):
if not hasattr(self, '_spiders'):
warnings.warn("Crawler.spiders is deprecated, use "
"CrawlerRunner.spider_loader or instantiate "
"scrapy.spiderloader.SpiderLoader with your "
"settings.",
category=ScrapyDeprecationWarning, stacklevel=2)
self._spiders = _get_spider_loader(self.settings.frozencopy())
return self._spiders
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
start_requests = iter(self.spider.start_requests())
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
def _create_spider(self, *args, **kwargs):
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self):
return ExecutionEngine(self, lambda _: self.stop())
@defer.inlineCallbacks
def stop(self):
if self.crawling:
self.crawling = False
yield defer.maybeDeferred(self.engine.stop)
class CrawlerRunner(object):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup Twisted `reactor`_.
The CrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
crawlers = property(
lambda self: self._crawlers,
doc="Set of :class:`crawlers <scrapy.crawler.Crawler>` started by "
":meth:`crawl` and managed by this class."
)
def __init__(self, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.settings = settings
self.spider_loader = _get_spider_loader(settings)
self._crawlers = set()
self._active = set()
@property
def spiders(self):
warnings.warn("CrawlerRunner.spiders attribute is renamed to "
"CrawlerRunner.spider_loader.",
category=ScrapyDeprecationWarning, stacklevel=2)
return self.spider_loader
def crawl(self, crawler_or_spidercls, *args, **kwargs):
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If `crawler_or_spidercls` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a deferred that is fired when the crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param list args: arguments to initialize the spider
:param dict kwargs: keyword arguments to initialize the spider
"""
crawler = crawler_or_spidercls
if not isinstance(crawler_or_spidercls, Crawler):
crawler = self._create_crawler(crawler_or_spidercls)
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
def _done(result):
self.crawlers.discard(crawler)
self._active.discard(d)
return result
return d.addBoth(_done)
def _create_crawler(self, spidercls):
if isinstance(spidercls, six.string_types):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
def stop(self):
"""
Stops simultaneously all the crawling jobs taking place.
Returns a deferred that is fired when they all have ended.
"""
return defer.DeferredList([c.stop() for c in list(self.crawlers)])
@defer.inlineCallbacks
def join(self):
"""
join()
Returns a deferred that is fired when all managed :attr:`crawlers` have
completed their executions.
"""
while self._active:
yield defer.DeferredList(self._active)
class CrawlerProcess(CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a Twisted `reactor`_ and handling shutdown signals, like the
keyboard interrupt command Ctrl-C. It also configures top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
Twisted `reactor`_ within your application.
The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
def __init__(self, settings):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
configure_logging(settings)
log_scrapy_info(settings)
def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
{'signame': signame})
reactor.callFromThread(self.stop)
def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info('Received %(signame)s twice, forcing unclean shutdown',
{'signame': signame})
reactor.callFromThread(self._stop_reactor)
def start(self, stop_after_crawl=True):
"""
This method starts a Twisted `reactor`_, adjusts its pool size to
:setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If `stop_after_crawl` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param boolean stop_after_crawl: stop or not the reactor when all
crawlers have finished
"""
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(lambda _: self._stop_reactor())
cache_size = self.settings.getint('DNSCACHE_SIZE') if self.settings.getbool('DNSCACHE_ENABLED') else 0
reactor.installResolver(CachingThreadedResolver(reactor, cache_size,
self.settings.getfloat('DNS_TIMEOUT')))
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call
def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
def _get_spider_loader(settings):
""" Get SpiderLoader instance from settings """
if settings.get('SPIDER_MANAGER_CLASS'):
warnings.warn(
'SPIDER_MANAGER_CLASS option is deprecated. '
'Please use SPIDER_LOADER_CLASS.',
category=ScrapyDeprecationWarning, stacklevel=2
)
cls_path = settings.get('SPIDER_MANAGER_CLASS',
settings.get('SPIDER_LOADER_CLASS'))
loader_cls = load_object(cls_path)
try:
verifyClass(ISpiderLoader, loader_cls)
except DoesNotImplement:
warnings.warn(
'SPIDER_LOADER_CLASS (previously named SPIDER_MANAGER_CLASS) does '
'not fully implement scrapy.interfaces.ISpiderLoader interface. '
'Please add all missing methods to avoid unexpected runtime errors.',
category=ScrapyDeprecationWarning, stacklevel=2
)
return loader_cls.from_settings(settings.frozencopy())
import six
import signal
import logging
import warnings
from twisted.internet import reactor, defer
from zope.interface.verify import verifyClass, DoesNotImplement
from scrapy.core.engine import ExecutionEngine
from scrapy.resolver import CachingThreadedResolver
from scrapy.interfaces import ISpiderLoader
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings
from scrapy.signalmanager import SignalManager
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.misc import load_object
from scrapy.utils.log import LogCounterHandler, configure_logging, log_scrapy_info
from scrapy import signals
logger = logging.getLogger(__name__)
class Crawler(object):
def __init__(self, spidercls, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.spidercls = spidercls
self.settings = settings.copy()
self.signals = SignalManager(self)
self.stats = load_object(self.settings['STATS_CLASS'])(self)
handler = LogCounterHandler(self, level=settings.get('LOG_LEVEL'))
logging.root.addHandler(handler)
# lambda is assigned to Crawler attribute because this way it is not
# garbage collected after leaving __init__ scope
self.__remove_handler = lambda: logging.root.removeHandler(handler)
self.signals.connect(self.__remove_handler, signals.engine_stopped)
lf_cls = load_object(self.settings['LOG_FORMATTER'])
self.logformatter = lf_cls.from_crawler(self)
self.extensions = ExtensionManager.from_crawler(self)
self.spidercls.update_settings(self.settings)
self.settings.freeze()
self.crawling = False
self.spider = None
self.engine = None
@property
def spiders(self):
if not hasattr(self, '_spiders'):
warnings.warn("Crawler.spiders is deprecated, use "
"CrawlerRunner.spider_loader or instantiate "
"scrapy.spiderloader.SpiderLoader with your "
"settings.",
category=ScrapyDeprecationWarning, stacklevel=2)
self._spiders = _get_spider_loader(self.settings.frozencopy())
return self._spiders
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
start_requests = iter(self.spider.start_requests())
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
def _create_spider(self, *args, **kwargs):
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self):
return ExecutionEngine(self, lambda _: self.stop())
@defer.inlineCallbacks
def stop(self):
if self.crawling:
self.crawling = False
yield defer.maybeDeferred(self.engine.stop)
class CrawlerRunner(object):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup Twisted `reactor`_.
The CrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
crawlers = property(
lambda self: self._crawlers,
doc="Set of :class:`crawlers <scrapy.crawler.Crawler>` started by "
":meth:`crawl` and managed by this class."
)
def __init__(self, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.settings = settings
self.spider_loader = _get_spider_loader(settings)
self._crawlers = set()
self._active = set()
@property
def spiders(self):
warnings.warn("CrawlerRunner.spiders attribute is renamed to "
"CrawlerRunner.spider_loader.",
category=ScrapyDeprecationWarning, stacklevel=2)
return self.spider_loader
def crawl(self, crawler_or_spidercls, *args, **kwargs):
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If `crawler_or_spidercls` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a deferred that is fired when the crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param list args: arguments to initialize the spider
:param dict kwargs: keyword arguments to initialize the spider
"""
crawler = crawler_or_spidercls
if not isinstance(crawler_or_spidercls, Crawler):
crawler = self._create_crawler(crawler_or_spidercls)
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
def _done(result):
self.crawlers.discard(crawler)
self._active.discard(d)
return result
return d.addBoth(_done)
def _create_crawler(self, spidercls):
if isinstance(spidercls, six.string_types):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
def stop(self):
"""
Stops simultaneously all the crawling jobs taking place.
Returns a deferred that is fired when they all have ended.
"""
return defer.DeferredList([c.stop() for c in list(self.crawlers)])
@defer.inlineCallbacks
def join(self):
"""
join()
Returns a deferred that is fired when all managed :attr:`crawlers` have
completed their executions.
"""
while self._active:
yield defer.DeferredList(self._active)
class CrawlerProcess(CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a Twisted `reactor`_ and handling shutdown signals, like the
keyboard interrupt command Ctrl-C. It also configures top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
Twisted `reactor`_ within your application.
The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
def __init__(self, settings):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
configure_logging(self.settings)
log_scrapy_info(self.settings)
def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
{'signame': signame})
reactor.callFromThread(self.stop)
def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info('Received %(signame)s twice, forcing unclean shutdown',
{'signame': signame})
reactor.callFromThread(self._stop_reactor)
def start(self, stop_after_crawl=True):
"""
This method starts a Twisted `reactor`_, adjusts its pool size to
:setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If `stop_after_crawl` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param boolean stop_after_crawl: stop or not the reactor when all
crawlers have finished
"""
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(lambda _: self._stop_reactor())
cache_size = self.settings.getint('DNSCACHE_SIZE') if self.settings.getbool('DNSCACHE_ENABLED') else 0
reactor.installResolver(CachingThreadedResolver(reactor, cache_size,
self.settings.getfloat('DNS_TIMEOUT')))
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call
def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
def _get_spider_loader(settings):
""" Get SpiderLoader instance from settings """
if settings.get('SPIDER_MANAGER_CLASS'):
warnings.warn(
'SPIDER_MANAGER_CLASS option is deprecated. '
'Please use SPIDER_LOADER_CLASS.',
category=ScrapyDeprecationWarning, stacklevel=2
)
cls_path = settings.get('SPIDER_MANAGER_CLASS',
settings.get('SPIDER_LOADER_CLASS'))
loader_cls = load_object(cls_path)
try:
verifyClass(ISpiderLoader, loader_cls)
except DoesNotImplement:
warnings.warn(
'SPIDER_LOADER_CLASS (previously named SPIDER_MANAGER_CLASS) does '
'not fully implement scrapy.interfaces.ISpiderLoader interface. '
'Please add all missing methods to avoid unexpected runtime errors.',
category=ScrapyDeprecationWarning, stacklevel=2
)
return loader_cls.from_settings(settings.frozencopy())
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-32-fixed/scrapy/scrapy/crawler.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-32-buggy/scrapy/scrapy/crawler.py |
scrapy-bug-28 | from __future__ import print_function
import os
import logging
from scrapy.utils.job import job_dir
from scrapy.utils.request import request_fingerprint
class BaseDupeFilter(object):
@classmethod
def from_settings(cls, settings):
return cls()
def request_seen(self, request):
return False
def open(self): # can return deferred
pass
def close(self, reason): # can return a deferred
pass
def log(self, request, spider): # log that a request has been filtered
pass
class RFPDupeFilter(BaseDupeFilter):
"""Request Fingerprint duplicates filter"""
def __init__(self, path=None, debug=False):
self.file = None
self.fingerprints = set()
self.logdupes = True
self.debug = debug
self.logger = logging.getLogger(__name__)
if path:
self.file = open(os.path.join(path, 'requests.seen'), 'a+')
self.fingerprints.update(x.rstrip() for x in self.file)
@classmethod
def from_settings(cls, settings):
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(job_dir(settings), debug)
def request_seen(self, request):
fp = self.request_fingerprint(request)
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
if self.file:
self.file.write(fp + os.linesep)
def request_fingerprint(self, request):
return request_fingerprint(request)
def close(self, reason):
if self.file:
self.file.close()
def log(self, request, spider):
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request: %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
spider.crawler.stats.inc_value('dupefilter/filtered', spider=spider)
from __future__ import print_function
import os
import logging
from scrapy.utils.job import job_dir
from scrapy.utils.request import request_fingerprint
class BaseDupeFilter(object):
@classmethod
def from_settings(cls, settings):
return cls()
def request_seen(self, request):
return False
def open(self): # can return deferred
pass
def close(self, reason): # can return a deferred
pass
def log(self, request, spider): # log that a request has been filtered
pass
class RFPDupeFilter(BaseDupeFilter):
"""Request Fingerprint duplicates filter"""
def __init__(self, path=None, debug=False):
self.file = None
self.fingerprints = set()
self.logdupes = True
self.debug = debug
self.logger = logging.getLogger(__name__)
if path:
self.file = open(os.path.join(path, 'requests.seen'), 'a+')
self.fingerprints.update(x.rstrip() for x in self.file)
@classmethod
def from_settings(cls, settings):
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(job_dir(settings), debug)
def request_seen(self, request):
fp = self.request_fingerprint(request)
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
if self.file:
self.file.write(fp + os.linesep)
def request_fingerprint(self, request):
return request_fingerprint(request)
def close(self, reason):
if self.file:
self.file.close()
def log(self, request, spider):
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request: %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
spider.crawler.stats.inc_value('dupefilter/filtered', spider=spider)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-28-fixed/scrapy/scrapy/dupefilters.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-28-buggy/scrapy/scrapy/dupefilters.py |
scrapy-bug-24 | """Download handlers for http and https schemes"""
import re
import logging
from io import BytesIO
from time import time
from six.moves.urllib.parse import urldefrag
from zope.interface import implementer
from twisted.internet import defer, reactor, protocol
from twisted.web.http_headers import Headers as TxHeaders
from twisted.web.iweb import IBodyProducer, UNKNOWN_LENGTH
from twisted.internet.error import TimeoutError
from twisted.web.http import PotentialDataLoss
from scrapy.xlib.tx import Agent, ProxyAgent, ResponseDone, \
HTTPConnectionPool, TCP4ClientEndpoint
from scrapy.http import Headers
from scrapy.responsetypes import responsetypes
from scrapy.core.downloader.webclient import _parse
from scrapy.utils.misc import load_object
from scrapy.utils.python import to_bytes, to_unicode
from scrapy import twisted_version
logger = logging.getLogger(__name__)
class HTTP11DownloadHandler(object):
def __init__(self, settings):
self._pool = HTTPConnectionPool(reactor, persistent=True)
self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self._pool._factory.noisy = False
self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
self._contextFactory = self._contextFactoryClass()
self._default_maxsize = settings.getint('DOWNLOAD_MAXSIZE')
self._default_warnsize = settings.getint('DOWNLOAD_WARNSIZE')
self._disconnect_timeout = 1
def download_request(self, request, spider):
"""Return a deferred for the HTTP download"""
agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool,
maxsize=getattr(spider, 'download_maxsize', self._default_maxsize),
warnsize=getattr(spider, 'download_warnsize', self._default_warnsize))
return agent.download_request(request)
def close(self):
d = self._pool.closeCachedConnections()
# closeCachedConnections will hang on network or server issues, so
# we'll manually timeout the deferred.
#
# Twisted issue addressing this problem can be found here:
# https://twistedmatrix.com/trac/ticket/7738.
#
# closeCachedConnections doesn't handle external errbacks, so we'll
# issue a callback after `_disconnect_timeout` seconds.
delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])
def cancel_delayed_call(result):
if delayed_call.active():
delayed_call.cancel()
return result
d.addBoth(cancel_delayed_call)
return d
class TunnelError(Exception):
"""An HTTP CONNECT tunnel could not be established by the proxy."""
class TunnelingTCP4ClientEndpoint(TCP4ClientEndpoint):
"""An endpoint that tunnels through proxies to allow HTTPS downloads. To
accomplish that, this endpoint sends an HTTP CONNECT to the proxy.
The HTTP CONNECT is always sent when using this endpoint, I think this could
be improved as the CONNECT will be redundant if the connection associated
with this endpoint comes from the pool and a CONNECT has already been issued
for it.
"""
_responseMatcher = re.compile('HTTP/1\.. 200')
def __init__(self, reactor, host, port, proxyConf, contextFactory,
timeout=30, bindAddress=None):
proxyHost, proxyPort, self._proxyAuthHeader = proxyConf
super(TunnelingTCP4ClientEndpoint, self).__init__(reactor, proxyHost,
proxyPort, timeout, bindAddress)
self._tunnelReadyDeferred = defer.Deferred()
self._tunneledHost = host
self._tunneledPort = port
self._contextFactory = contextFactory
def requestTunnel(self, protocol):
"""Asks the proxy to open a tunnel."""
tunnelReq = 'CONNECT %s:%s HTTP/1.1\r\n' % (self._tunneledHost,
self._tunneledPort)
if self._proxyAuthHeader:
tunnelReq += 'Proxy-Authorization: %s\r\n' % self._proxyAuthHeader
tunnelReq += '\r\n'
protocol.transport.write(tunnelReq)
self._protocolDataReceived = protocol.dataReceived
protocol.dataReceived = self.processProxyResponse
self._protocol = protocol
return protocol
def processProxyResponse(self, bytes):
"""Processes the response from the proxy. If the tunnel is successfully
created, notifies the client that we are ready to send requests. If not
raises a TunnelError.
"""
self._protocol.dataReceived = self._protocolDataReceived
if TunnelingTCP4ClientEndpoint._responseMatcher.match(bytes):
self._protocol.transport.startTLS(self._contextFactory,
self._protocolFactory)
self._tunnelReadyDeferred.callback(self._protocol)
else:
self._tunnelReadyDeferred.errback(
TunnelError('Could not open CONNECT tunnel.'))
def connectFailed(self, reason):
"""Propagates the errback to the appropriate deferred."""
self._tunnelReadyDeferred.errback(reason)
def connect(self, protocolFactory):
self._protocolFactory = protocolFactory
connectDeferred = super(TunnelingTCP4ClientEndpoint,
self).connect(protocolFactory)
connectDeferred.addCallback(self.requestTunnel)
connectDeferred.addErrback(self.connectFailed)
return self._tunnelReadyDeferred
class TunnelingAgent(Agent):
"""An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS
downloads. It may look strange that we have chosen to subclass Agent and not
ProxyAgent but consider that after the tunnel is opened the proxy is
transparent to the client; thus the agent should behave like there is no
proxy involved.
"""
def __init__(self, reactor, proxyConf, contextFactory=None,
connectTimeout=None, bindAddress=None, pool=None):
super(TunnelingAgent, self).__init__(reactor, contextFactory,
connectTimeout, bindAddress, pool)
self._proxyConf = proxyConf
self._contextFactory = contextFactory
if twisted_version >= (15, 0, 0):
def _getEndpoint(self, uri):
return TunnelingTCP4ClientEndpoint(
self._reactor, uri.host, uri.port, self._proxyConf,
self._contextFactory, self._endpointFactory._connectTimeout,
self._endpointFactory._bindAddress)
else:
def _getEndpoint(self, scheme, host, port):
return TunnelingTCP4ClientEndpoint(
self._reactor, host, port, self._proxyConf,
self._contextFactory, self._connectTimeout,
self._bindAddress)
class ScrapyAgent(object):
_Agent = Agent
_ProxyAgent = ProxyAgent
_TunnelingAgent = TunnelingAgent
def __init__(self, contextFactory=None, connectTimeout=10, bindAddress=None, pool=None,
maxsize=0, warnsize=0):
self._contextFactory = contextFactory
self._connectTimeout = connectTimeout
self._bindAddress = bindAddress
self._pool = pool
self._maxsize = maxsize
self._warnsize = warnsize
def _get_agent(self, request, timeout):
bindaddress = request.meta.get('bindaddress') or self._bindAddress
proxy = request.meta.get('proxy')
if proxy:
_, _, proxyHost, proxyPort, proxyParams = _parse(proxy)
scheme = _parse(request.url)[0]
proxyHost = to_unicode(proxyHost)
omitConnectTunnel = proxyParams.find(b'noconnect') >= 0
if scheme == b'https' and not omitConnectTunnel:
proxyConf = (proxyHost, proxyPort,
request.headers.get(b'Proxy-Authorization', None))
return self._TunnelingAgent(reactor, proxyConf,
contextFactory=self._contextFactory, connectTimeout=timeout,
bindAddress=bindaddress, pool=self._pool)
else:
endpoint = TCP4ClientEndpoint(reactor, proxyHost, proxyPort,
timeout=timeout, bindAddress=bindaddress)
return self._ProxyAgent(endpoint)
return self._Agent(reactor, contextFactory=self._contextFactory,
connectTimeout=timeout, bindAddress=bindaddress, pool=self._pool)
def download_request(self, request):
timeout = request.meta.get('download_timeout') or self._connectTimeout
agent = self._get_agent(request, timeout)
# request details
url = to_bytes(urldefrag(request.url)[0])
method = to_bytes(request.method)
headers = TxHeaders(request.headers)
if isinstance(agent, self._TunnelingAgent):
headers.removeHeader(b'Proxy-Authorization')
bodyproducer = _RequestBodyProducer(request.body) if request.body else None
start_time = time()
d = agent.request(method, url, headers, bodyproducer)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d.addCallback(self._cb_bodyready, request)
d.addCallback(self._cb_bodydone, request, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, url, timeout)
return d
def _cb_timeout(self, result, request, url, timeout):
if self._timeout_cl.active():
self._timeout_cl.cancel()
return result
raise TimeoutError("Getting %s took longer than %s seconds." % (url, timeout))
def _cb_latency(self, result, request, start_time):
request.meta['download_latency'] = time() - start_time
return result
def _cb_bodyready(self, txresponse, request):
# deliverBody hangs for responses without body
if txresponse.length == 0:
return txresponse, b'', None
maxsize = request.meta.get('download_maxsize', self._maxsize)
warnsize = request.meta.get('download_warnsize', self._warnsize)
expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1
if maxsize and expected_size > maxsize:
logger.error("Expected response size (%(size)s) larger than "
"download max size (%(maxsize)s).",
{'size': expected_size, 'maxsize': maxsize})
txresponse._transport._producer.loseConnection()
raise defer.CancelledError()
if warnsize and expected_size > warnsize:
logger.warning("Expected response size (%(size)s) larger than "
"download warn size (%(warnsize)s).",
{'size': expected_size, 'warnsize': warnsize})
def _cancel(_):
txresponse._transport._producer.loseConnection()
d = defer.Deferred(_cancel)
txresponse.deliverBody(_ResponseReader(d, txresponse, request, maxsize, warnsize))
return d
def _cb_bodydone(self, result, request, url):
txresponse, body, flags = result
status = int(txresponse.code)
headers = Headers(txresponse.headers.getAllRawHeaders())
url = to_unicode(url)
respcls = responsetypes.from_args(headers=headers, url=url)
return respcls(
url=url, status=status, headers=headers, body=body, flags=flags)
@implementer(IBodyProducer)
class _RequestBodyProducer(object):
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class _ResponseReader(protocol.Protocol):
def __init__(self, finished, txresponse, request, maxsize, warnsize):
self._finished = finished
self._txresponse = txresponse
self._request = request
self._bodybuf = BytesIO()
self._maxsize = maxsize
self._warnsize = warnsize
self._bytes_received = 0
def dataReceived(self, bodyBytes):
self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes)
if self._maxsize and self._bytes_received > self._maxsize:
logger.error("Received (%(bytes)s) bytes larger than download "
"max size (%(maxsize)s).",
{'bytes': self._bytes_received,
'maxsize': self._maxsize})
self._finished.cancel()
if self._warnsize and self._bytes_received > self._warnsize:
logger.warning("Received (%(bytes)s) bytes larger than download "
"warn size (%(warnsize)s).",
{'bytes': self._bytes_received,
'warnsize': self._warnsize})
def connectionLost(self, reason):
if self._finished.called:
return
body = self._bodybuf.getvalue()
if reason.check(ResponseDone):
self._finished.callback((self._txresponse, body, None))
elif reason.check(PotentialDataLoss):
self._finished.callback((self._txresponse, body, ['partial']))
else:
self._finished.errback(reason)
"""Download handlers for http and https schemes"""
import re
import logging
from io import BytesIO
from time import time
from six.moves.urllib.parse import urldefrag
from zope.interface import implementer
from twisted.internet import defer, reactor, protocol
from twisted.web.http_headers import Headers as TxHeaders
from twisted.web.iweb import IBodyProducer, UNKNOWN_LENGTH
from twisted.internet.error import TimeoutError
from twisted.web.http import PotentialDataLoss
from scrapy.xlib.tx import Agent, ProxyAgent, ResponseDone, \
HTTPConnectionPool, TCP4ClientEndpoint
from scrapy.http import Headers
from scrapy.responsetypes import responsetypes
from scrapy.core.downloader.webclient import _parse
from scrapy.utils.misc import load_object
from scrapy.utils.python import to_bytes, to_unicode
from scrapy import twisted_version
logger = logging.getLogger(__name__)
class HTTP11DownloadHandler(object):
def __init__(self, settings):
self._pool = HTTPConnectionPool(reactor, persistent=True)
self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self._pool._factory.noisy = False
self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
self._contextFactory = self._contextFactoryClass()
self._default_maxsize = settings.getint('DOWNLOAD_MAXSIZE')
self._default_warnsize = settings.getint('DOWNLOAD_WARNSIZE')
self._disconnect_timeout = 1
def download_request(self, request, spider):
"""Return a deferred for the HTTP download"""
agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool,
maxsize=getattr(spider, 'download_maxsize', self._default_maxsize),
warnsize=getattr(spider, 'download_warnsize', self._default_warnsize))
return agent.download_request(request)
def close(self):
d = self._pool.closeCachedConnections()
# closeCachedConnections will hang on network or server issues, so
# we'll manually timeout the deferred.
#
# Twisted issue addressing this problem can be found here:
# https://twistedmatrix.com/trac/ticket/7738.
#
# closeCachedConnections doesn't handle external errbacks, so we'll
# issue a callback after `_disconnect_timeout` seconds.
delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])
def cancel_delayed_call(result):
if delayed_call.active():
delayed_call.cancel()
return result
d.addBoth(cancel_delayed_call)
return d
class TunnelError(Exception):
"""An HTTP CONNECT tunnel could not be established by the proxy."""
class TunnelingTCP4ClientEndpoint(TCP4ClientEndpoint):
"""An endpoint that tunnels through proxies to allow HTTPS downloads. To
accomplish that, this endpoint sends an HTTP CONNECT to the proxy.
The HTTP CONNECT is always sent when using this endpoint, I think this could
be improved as the CONNECT will be redundant if the connection associated
with this endpoint comes from the pool and a CONNECT has already been issued
for it.
"""
_responseMatcher = re.compile(b'HTTP/1\.. 200')
def __init__(self, reactor, host, port, proxyConf, contextFactory,
timeout=30, bindAddress=None):
proxyHost, proxyPort, self._proxyAuthHeader = proxyConf
super(TunnelingTCP4ClientEndpoint, self).__init__(reactor, proxyHost,
proxyPort, timeout, bindAddress)
self._tunnelReadyDeferred = defer.Deferred()
self._tunneledHost = host
self._tunneledPort = port
self._contextFactory = contextFactory
def requestTunnel(self, protocol):
"""Asks the proxy to open a tunnel."""
tunnelReq = (
b'CONNECT ' +
to_bytes(self._tunneledHost, encoding='ascii') + b':' +
to_bytes(str(self._tunneledPort)) +
b' HTTP/1.1\r\n')
if self._proxyAuthHeader:
tunnelReq += \
b'Proxy-Authorization: ' + self._proxyAuthHeader + b'\r\n'
tunnelReq += b'\r\n'
protocol.transport.write(tunnelReq)
self._protocolDataReceived = protocol.dataReceived
protocol.dataReceived = self.processProxyResponse
self._protocol = protocol
return protocol
def processProxyResponse(self, bytes):
"""Processes the response from the proxy. If the tunnel is successfully
created, notifies the client that we are ready to send requests. If not
raises a TunnelError.
"""
self._protocol.dataReceived = self._protocolDataReceived
if TunnelingTCP4ClientEndpoint._responseMatcher.match(bytes):
self._protocol.transport.startTLS(self._contextFactory,
self._protocolFactory)
self._tunnelReadyDeferred.callback(self._protocol)
else:
self._tunnelReadyDeferred.errback(
TunnelError('Could not open CONNECT tunnel.'))
def connectFailed(self, reason):
"""Propagates the errback to the appropriate deferred."""
self._tunnelReadyDeferred.errback(reason)
def connect(self, protocolFactory):
self._protocolFactory = protocolFactory
connectDeferred = super(TunnelingTCP4ClientEndpoint,
self).connect(protocolFactory)
connectDeferred.addCallback(self.requestTunnel)
connectDeferred.addErrback(self.connectFailed)
return self._tunnelReadyDeferred
class TunnelingAgent(Agent):
"""An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS
downloads. It may look strange that we have chosen to subclass Agent and not
ProxyAgent but consider that after the tunnel is opened the proxy is
transparent to the client; thus the agent should behave like there is no
proxy involved.
"""
def __init__(self, reactor, proxyConf, contextFactory=None,
connectTimeout=None, bindAddress=None, pool=None):
super(TunnelingAgent, self).__init__(reactor, contextFactory,
connectTimeout, bindAddress, pool)
self._proxyConf = proxyConf
self._contextFactory = contextFactory
if twisted_version >= (15, 0, 0):
def _getEndpoint(self, uri):
return TunnelingTCP4ClientEndpoint(
self._reactor, uri.host, uri.port, self._proxyConf,
self._contextFactory, self._endpointFactory._connectTimeout,
self._endpointFactory._bindAddress)
else:
def _getEndpoint(self, scheme, host, port):
return TunnelingTCP4ClientEndpoint(
self._reactor, host, port, self._proxyConf,
self._contextFactory, self._connectTimeout,
self._bindAddress)
class ScrapyAgent(object):
_Agent = Agent
_ProxyAgent = ProxyAgent
_TunnelingAgent = TunnelingAgent
def __init__(self, contextFactory=None, connectTimeout=10, bindAddress=None, pool=None,
maxsize=0, warnsize=0):
self._contextFactory = contextFactory
self._connectTimeout = connectTimeout
self._bindAddress = bindAddress
self._pool = pool
self._maxsize = maxsize
self._warnsize = warnsize
def _get_agent(self, request, timeout):
bindaddress = request.meta.get('bindaddress') or self._bindAddress
proxy = request.meta.get('proxy')
if proxy:
_, _, proxyHost, proxyPort, proxyParams = _parse(proxy)
scheme = _parse(request.url)[0]
proxyHost = to_unicode(proxyHost)
omitConnectTunnel = proxyParams.find(b'noconnect') >= 0
if scheme == b'https' and not omitConnectTunnel:
proxyConf = (proxyHost, proxyPort,
request.headers.get(b'Proxy-Authorization', None))
return self._TunnelingAgent(reactor, proxyConf,
contextFactory=self._contextFactory, connectTimeout=timeout,
bindAddress=bindaddress, pool=self._pool)
else:
endpoint = TCP4ClientEndpoint(reactor, proxyHost, proxyPort,
timeout=timeout, bindAddress=bindaddress)
return self._ProxyAgent(endpoint)
return self._Agent(reactor, contextFactory=self._contextFactory,
connectTimeout=timeout, bindAddress=bindaddress, pool=self._pool)
def download_request(self, request):
timeout = request.meta.get('download_timeout') or self._connectTimeout
agent = self._get_agent(request, timeout)
# request details
url = to_bytes(urldefrag(request.url)[0])
method = to_bytes(request.method)
headers = TxHeaders(request.headers)
if isinstance(agent, self._TunnelingAgent):
headers.removeHeader(b'Proxy-Authorization')
bodyproducer = _RequestBodyProducer(request.body) if request.body else None
start_time = time()
d = agent.request(method, url, headers, bodyproducer)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d.addCallback(self._cb_bodyready, request)
d.addCallback(self._cb_bodydone, request, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, url, timeout)
return d
def _cb_timeout(self, result, request, url, timeout):
if self._timeout_cl.active():
self._timeout_cl.cancel()
return result
raise TimeoutError("Getting %s took longer than %s seconds." % (url, timeout))
def _cb_latency(self, result, request, start_time):
request.meta['download_latency'] = time() - start_time
return result
def _cb_bodyready(self, txresponse, request):
# deliverBody hangs for responses without body
if txresponse.length == 0:
return txresponse, b'', None
maxsize = request.meta.get('download_maxsize', self._maxsize)
warnsize = request.meta.get('download_warnsize', self._warnsize)
expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1
if maxsize and expected_size > maxsize:
logger.error("Expected response size (%(size)s) larger than "
"download max size (%(maxsize)s).",
{'size': expected_size, 'maxsize': maxsize})
txresponse._transport._producer.loseConnection()
raise defer.CancelledError()
if warnsize and expected_size > warnsize:
logger.warning("Expected response size (%(size)s) larger than "
"download warn size (%(warnsize)s).",
{'size': expected_size, 'warnsize': warnsize})
def _cancel(_):
txresponse._transport._producer.loseConnection()
d = defer.Deferred(_cancel)
txresponse.deliverBody(_ResponseReader(d, txresponse, request, maxsize, warnsize))
return d
def _cb_bodydone(self, result, request, url):
txresponse, body, flags = result
status = int(txresponse.code)
headers = Headers(txresponse.headers.getAllRawHeaders())
url = to_unicode(url)
respcls = responsetypes.from_args(headers=headers, url=url)
return respcls(
url=url, status=status, headers=headers, body=body, flags=flags)
@implementer(IBodyProducer)
class _RequestBodyProducer(object):
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class _ResponseReader(protocol.Protocol):
def __init__(self, finished, txresponse, request, maxsize, warnsize):
self._finished = finished
self._txresponse = txresponse
self._request = request
self._bodybuf = BytesIO()
self._maxsize = maxsize
self._warnsize = warnsize
self._bytes_received = 0
def dataReceived(self, bodyBytes):
self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes)
if self._maxsize and self._bytes_received > self._maxsize:
logger.error("Received (%(bytes)s) bytes larger than download "
"max size (%(maxsize)s).",
{'bytes': self._bytes_received,
'maxsize': self._maxsize})
self._finished.cancel()
if self._warnsize and self._bytes_received > self._warnsize:
logger.warning("Received (%(bytes)s) bytes larger than download "
"warn size (%(warnsize)s).",
{'bytes': self._bytes_received,
'warnsize': self._warnsize})
def connectionLost(self, reason):
if self._finished.called:
return
body = self._bodybuf.getvalue()
if reason.check(ResponseDone):
self._finished.callback((self._txresponse, body, None))
elif reason.check(PotentialDataLoss):
self._finished.callback((self._txresponse, body, ['partial']))
else:
self._finished.errback(reason)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-24-fixed/scrapy/scrapy/core/downloader/handlers/http11.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-24-buggy/scrapy/scrapy/core/downloader/handlers/http11.py |
scrapy-bug-35 | import six
import signal
import warnings
from twisted.internet import reactor, defer
from zope.interface.verify import verifyClass
from scrapy.core.engine import ExecutionEngine
from scrapy.resolver import CachingThreadedResolver
from scrapy.interfaces import ISpiderLoader
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings
from scrapy.signalmanager import SignalManager
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.misc import load_object
from scrapy import log, signals
class Crawler(object):
def __init__(self, spidercls, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.spidercls = spidercls
self.settings = settings.copy()
self.signals = SignalManager(self)
self.stats = load_object(self.settings['STATS_CLASS'])(self)
lf_cls = load_object(self.settings['LOG_FORMATTER'])
self.logformatter = lf_cls.from_crawler(self)
self.extensions = ExtensionManager.from_crawler(self)
self.spidercls.update_settings(self.settings)
self.settings.freeze()
self.crawling = False
self.spider = None
self.engine = None
@property
def spiders(self):
if not hasattr(self, '_spiders'):
warnings.warn("Crawler.spiders is deprecated, use "
"CrawlerRunner.spider_loader or instantiate "
"scrapy.spiderloader.SpiderLoader with your "
"settings.",
category=ScrapyDeprecationWarning, stacklevel=2)
self._spiders = _get_spider_loader(self.settings.frozencopy())
return self._spiders
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
start_requests = iter(self.spider.start_requests())
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
def _create_spider(self, *args, **kwargs):
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self):
return ExecutionEngine(self, lambda _: self.stop())
@defer.inlineCallbacks
def stop(self):
if self.crawling:
self.crawling = False
yield defer.maybeDeferred(self.engine.stop)
class CrawlerRunner(object):
def __init__(self, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.settings = settings
self.spider_loader = _get_spider_loader(settings)
self.crawlers = set()
self._active = set()
@property
def spiders(self):
warnings.warn("CrawlerRunner.spiders attribute is renamed to "
"CrawlerRunner.spider_loader.",
category=ScrapyDeprecationWarning, stacklevel=2)
return self.spider_loader
def crawl(self, crawler_or_spidercls, *args, **kwargs):
crawler = crawler_or_spidercls
if not isinstance(crawler_or_spidercls, Crawler):
crawler = self._create_crawler(crawler_or_spidercls)
self._setup_crawler_logging(crawler)
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
def _done(result):
self.crawlers.discard(crawler)
self._active.discard(d)
return result
return d.addBoth(_done)
def _create_crawler(self, spidercls):
if isinstance(spidercls, six.string_types):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
def _setup_crawler_logging(self, crawler):
log_observer = log.start_from_crawler(crawler)
if log_observer:
crawler.signals.connect(log_observer.stop, signals.engine_stopped)
def stop(self):
return defer.DeferredList([c.stop() for c in list(self.crawlers)])
@defer.inlineCallbacks
def join(self):
"""Wait for all managed crawlers to complete"""
while self._active:
yield defer.DeferredList(self._active)
class CrawlerProcess(CrawlerRunner):
"""A class to run multiple scrapy crawlers in a process simultaneously"""
def __init__(self, settings):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
self.stopping = False
self.log_observer = log.start_from_settings(self.settings)
log.scrapy_info(settings)
def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
log.msg(format="Received %(signame)s, shutting down gracefully. Send again to force ",
level=log.INFO, signame=signame)
reactor.callFromThread(self.stop)
def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
log.msg(format='Received %(signame)s twice, forcing unclean shutdown',
level=log.INFO, signame=signame)
self._stop_logging()
reactor.callFromThread(self._stop_reactor)
def start(self, stop_after_crawl=True):
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(lambda _: self._stop_reactor())
cache_size = self.settings.getint('DNSCACHE_SIZE') if self.settings.getbool('DNSCACHE_ENABLED') else 0
reactor.installResolver(CachingThreadedResolver(reactor, cache_size,
self.settings.getfloat('DNS_TIMEOUT')))
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call
def _stop_logging(self):
if self.log_observer:
self.log_observer.stop()
def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
def _get_spider_loader(settings):
""" Get SpiderLoader instance from settings """
if settings.get('SPIDER_MANAGER_CLASS'):
warnings.warn(
'SPIDER_MANAGER_CLASS option is deprecated. '
'Please use SPIDER_LOADER_CLASS.',
category=ScrapyDeprecationWarning, stacklevel=2
)
cls_path = settings.get('SPIDER_LOADER_CLASS',
settings.get('SPIDER_MANAGER_CLASS'))
loader_cls = load_object(cls_path)
verifyClass(ISpiderLoader, loader_cls)
return loader_cls.from_settings(settings.frozencopy())
import six
import signal
import warnings
from twisted.internet import reactor, defer
from zope.interface.verify import verifyClass
from scrapy.core.engine import ExecutionEngine
from scrapy.resolver import CachingThreadedResolver
from scrapy.interfaces import ISpiderLoader
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings
from scrapy.signalmanager import SignalManager
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.misc import load_object
from scrapy import log, signals
class Crawler(object):
def __init__(self, spidercls, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.spidercls = spidercls
self.settings = settings.copy()
self.signals = SignalManager(self)
self.stats = load_object(self.settings['STATS_CLASS'])(self)
lf_cls = load_object(self.settings['LOG_FORMATTER'])
self.logformatter = lf_cls.from_crawler(self)
self.extensions = ExtensionManager.from_crawler(self)
self.spidercls.update_settings(self.settings)
self.settings.freeze()
self.crawling = False
self.spider = None
self.engine = None
@property
def spiders(self):
if not hasattr(self, '_spiders'):
warnings.warn("Crawler.spiders is deprecated, use "
"CrawlerRunner.spider_loader or instantiate "
"scrapy.spiderloader.SpiderLoader with your "
"settings.",
category=ScrapyDeprecationWarning, stacklevel=2)
self._spiders = _get_spider_loader(self.settings.frozencopy())
return self._spiders
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
start_requests = iter(self.spider.start_requests())
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
def _create_spider(self, *args, **kwargs):
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self):
return ExecutionEngine(self, lambda _: self.stop())
@defer.inlineCallbacks
def stop(self):
if self.crawling:
self.crawling = False
yield defer.maybeDeferred(self.engine.stop)
class CrawlerRunner(object):
def __init__(self, settings):
if isinstance(settings, dict):
settings = Settings(settings)
self.settings = settings
self.spider_loader = _get_spider_loader(settings)
self.crawlers = set()
self._active = set()
@property
def spiders(self):
warnings.warn("CrawlerRunner.spiders attribute is renamed to "
"CrawlerRunner.spider_loader.",
category=ScrapyDeprecationWarning, stacklevel=2)
return self.spider_loader
def crawl(self, crawler_or_spidercls, *args, **kwargs):
crawler = crawler_or_spidercls
if not isinstance(crawler_or_spidercls, Crawler):
crawler = self._create_crawler(crawler_or_spidercls)
self._setup_crawler_logging(crawler)
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
def _done(result):
self.crawlers.discard(crawler)
self._active.discard(d)
return result
return d.addBoth(_done)
def _create_crawler(self, spidercls):
if isinstance(spidercls, six.string_types):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
def _setup_crawler_logging(self, crawler):
log_observer = log.start_from_crawler(crawler)
if log_observer:
crawler.signals.connect(log_observer.stop, signals.engine_stopped)
def stop(self):
return defer.DeferredList([c.stop() for c in list(self.crawlers)])
@defer.inlineCallbacks
def join(self):
"""Wait for all managed crawlers to complete"""
while self._active:
yield defer.DeferredList(self._active)
class CrawlerProcess(CrawlerRunner):
"""A class to run multiple scrapy crawlers in a process simultaneously"""
def __init__(self, settings):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
self.stopping = False
self.log_observer = log.start_from_settings(self.settings)
log.scrapy_info(settings)
def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
log.msg(format="Received %(signame)s, shutting down gracefully. Send again to force ",
level=log.INFO, signame=signame)
reactor.callFromThread(self.stop)
def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
log.msg(format='Received %(signame)s twice, forcing unclean shutdown',
level=log.INFO, signame=signame)
self._stop_logging()
reactor.callFromThread(self._stop_reactor)
def start(self, stop_after_crawl=True):
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(lambda _: self._stop_reactor())
cache_size = self.settings.getint('DNSCACHE_SIZE') if self.settings.getbool('DNSCACHE_ENABLED') else 0
reactor.installResolver(CachingThreadedResolver(reactor, cache_size,
self.settings.getfloat('DNS_TIMEOUT')))
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call
def _stop_logging(self):
if self.log_observer:
self.log_observer.stop()
def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
def _get_spider_loader(settings):
""" Get SpiderLoader instance from settings """
if settings.get('SPIDER_MANAGER_CLASS'):
warnings.warn(
'SPIDER_MANAGER_CLASS option is deprecated. '
'Please use SPIDER_LOADER_CLASS.',
category=ScrapyDeprecationWarning, stacklevel=2
)
cls_path = settings.get('SPIDER_MANAGER_CLASS',
settings.get('SPIDER_LOADER_CLASS'))
loader_cls = load_object(cls_path)
verifyClass(ISpiderLoader, loader_cls)
return loader_cls.from_settings(settings.frozencopy())
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-35-fixed/scrapy/scrapy/crawler.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-35-buggy/scrapy/scrapy/crawler.py |
scrapy-bug-3 | import logging
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + \
[reason]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""
Handle redirection of requests based on response status
and meta-refresh html tag.
"""
def process_response(self, request, response, spider):
if (request.meta.get('dont_redirect', False) or
response.status in getattr(spider, 'handle_httpstatus_list', []) or
response.status in request.meta.get('handle_httpstatus_list', []) or
request.meta.get('handle_httpstatus_all', False)):
return response
allowed_status = (301, 302, 303, 307, 308)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
location = safe_url_string(response.headers['location'])
redirected_url = urljoin(request.url, location)
if response.status in (301, 307, 308) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS')
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
interval, url = get_meta_refresh(response,
ignore_tags=self._ignore_tags)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
import logging
from six.moves.urllib.parse import urljoin, urlparse
from w3lib.url import safe_url_string
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + \
[reason]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""
Handle redirection of requests based on response status
and meta-refresh html tag.
"""
def process_response(self, request, response, spider):
if (request.meta.get('dont_redirect', False) or
response.status in getattr(spider, 'handle_httpstatus_list', []) or
response.status in request.meta.get('handle_httpstatus_list', []) or
request.meta.get('handle_httpstatus_all', False)):
return response
allowed_status = (301, 302, 303, 307, 308)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
location = safe_url_string(response.headers['Location'])
if response.headers['Location'].startswith(b'//'):
request_scheme = urlparse(request.url).scheme
location = request_scheme + '://' + location.lstrip('/')
redirected_url = urljoin(request.url, location)
if response.status in (301, 307, 308) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS')
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
interval, url = get_meta_refresh(response,
ignore_tags=self._ignore_tags)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-3-fixed/scrapy/scrapy/downloadermiddlewares/redirect.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-3-buggy/scrapy/scrapy/downloadermiddlewares/redirect.py |
scrapy-bug-15 | """
This module contains general purpose URL functions not found in the standard
library.
Some of the functions that used to be imported from this module have been moved
to the w3lib.url module. Always import those from there instead.
"""
import posixpath
import re
import six
from six.moves.urllib.parse import (ParseResult, urlunparse, urldefrag,
urlparse, parse_qsl, urlencode,
quote, unquote)
if not six.PY2:
from urllib.parse import unquote_to_bytes
# scrapy.utils.url was moved to w3lib.url and import * ensures this
# move doesn't break old code
from w3lib.url import *
from w3lib.url import _safe_chars
from scrapy.utils.python import to_bytes, to_native_str, to_unicode
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if not host:
return False
domains = [d.lower() for d in domains]
return any((host == d) or (host.endswith('.%s' % d)) for d in domains)
def url_is_from_spider(url, spider):
"""Return True if the url belongs to the given spider"""
return url_is_from_any_domain(url,
[spider.name] + list(getattr(spider, 'allowed_domains', [])))
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def _safe_ParseResult(parts, encoding='utf8', path_encoding='utf8'):
return (
to_native_str(parts.scheme),
to_native_str(parts.netloc.encode('idna')),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
quote(to_bytes(parts.params, path_encoding), _safe_chars),
# encoding of query and fragment follows page encoding
# or form-charset (if known and passed)
quote(to_bytes(parts.query, encoding), _safe_chars),
quote(to_bytes(parts.fragment, encoding), _safe_chars)
)
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
encoding=None):
"""Canonicalize the given url by applying the following procedures:
- sort query arguments, first by key, then by value
- percent encode paths ; non-ASCII characters are percent-encoded
using UTF-8 (RFC-3986)
- percent encode query arguments ; non-ASCII characters are percent-encoded
using passed `encoding` (UTF-8 by default)
- normalize all spaces (in query arguments) '+' (plus symbol)
- normalize percent encodings case (%2f -> %2F)
- remove query arguments with blank values (unless `keep_blank_values` is True)
- remove fragments (unless `keep_fragments` is True)
The url passed can be bytes or unicode, while the url returned is
always a native str (bytes in Python 2, unicode in Python 3).
For examples see the tests in tests/test_utils_url.py
"""
# If supplied `encoding` is not compatible with all characters in `url`,
# fallback to UTF-8 as safety net.
# UTF-8 can handle all Unicode characters,
# so we should be covered regarding URL normalization,
# if not for proper URL expected by remote website.
try:
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding=encoding)
except UnicodeEncodeError as e:
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding='utf8')
# 1. decode query-string as UTF-8 (or keep raw bytes),
# sort values,
# and percent-encode them back
if six.PY2:
keyvals = parse_qsl(query, keep_blank_values)
else:
# Python3's urllib.parse.parse_qsl does not work as wanted
# for percent-encoded characters that do not match passed encoding,
# they get lost.
#
# e.g., 'q=b%a3' becomes [('q', 'b\ufffd')]
# (ie. with 'REPLACEMENT CHARACTER' (U+FFFD),
# instead of \xa3 that you get with Python2's parse_qsl)
#
# what we want here is to keep raw bytes, and percent encode them
# so as to preserve whatever encoding what originally used.
#
# See https://tools.ietf.org/html/rfc3987#section-6.4:
#
# For example, it is possible to have a URI reference of
# "http://www.example.org/r%E9sum%E9.xml#r%C3%A9sum%C3%A9", where the
# document name is encoded in iso-8859-1 based on server settings, but
# where the fragment identifier is encoded in UTF-8 according to
# [XPointer]. The IRI corresponding to the above URI would be (in XML
# notation)
# "http://www.example.org/r%E9sum%E9.xml#résumé".
# Similar considerations apply to query parts. The functionality of
# IRIs (namely, to be able to include non-ASCII characters) can only be
# used if the query part is encoded in UTF-8.
keyvals = parse_qsl_to_bytes(query, keep_blank_values)
keyvals.sort()
query = urlencode(keyvals)
# 2. decode percent-encoded sequences in path as UTF-8 (or keep raw bytes)
# and percent-encode path again (this normalizes to upper-case %XX)
uqp = _unquotepath(path)
path = quote(uqp, _safe_chars) or '/'
fragment = '' if not keep_fragments else fragment
# every part should be safe already
return urlunparse((scheme, netloc.lower(), path, params, query, fragment))
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
if six.PY2:
# in Python 2, '%a3' becomes '\xa3', which is what we want
return unquote(path)
else:
# in Python 3,
# standard lib's unquote() does not work for non-UTF-8
# percent-escaped characters, they get lost.
# e.g., '%a3' becomes 'REPLACEMENT CHARACTER' (U+FFFD)
#
# unquote_to_bytes() returns raw bytes instead
return unquote_to_bytes(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
if isinstance(url, ParseResult):
return url
return urlparse(to_unicode(url, encoding))
if not six.PY2:
from urllib.parse import _coerce_args, unquote_to_bytes
def parse_qsl_to_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parse a query given as a string argument.
Data are returned as a list of name, value pairs as bytes.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
"""
# This code is the same as Python3's parse_qsl()
# (at https://hg.python.org/cpython/rev/c38ac7ab8d9a)
# except for the unquote(s, encoding, errors) calls replaced
# with unquote_to_bytes(s)
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote_to_bytes(name)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote_to_bytes(value)
value = _coerce_result(value)
r.append((name, value))
return r
def escape_ajax(url):
"""
Return the crawleable url according to:
http://code.google.com/web/ajaxcrawling/docs/getting-started.html
>>> escape_ajax("www.example.com/ajax.html#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?k1=v1&k2=v2#!key=value")
'www.example.com/ajax.html?k1=v1&k2=v2&_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html#!")
'www.example.com/ajax.html?_escaped_fragment_='
URLs that are not "AJAX crawlable" (according to Google) returned as-is:
>>> escape_ajax("www.example.com/ajax.html#key=value")
'www.example.com/ajax.html#key=value'
>>> escape_ajax("www.example.com/ajax.html#")
'www.example.com/ajax.html#'
>>> escape_ajax("www.example.com/ajax.html")
'www.example.com/ajax.html'
"""
defrag, frag = urldefrag(url)
if not frag.startswith('!'):
return url
return add_or_replace_parameter(defrag, '_escaped_fragment_', frag[1:])
def add_http_if_no_scheme(url):
"""Add http as the default scheme if it is missing from the url."""
match = re.match(r"^\w+://", url, flags=re.I)
if not match:
parts = urlparse(url)
scheme = "http:" if parts.netloc else "http://"
url = scheme + url
return url
def guess_scheme(url):
"""Add an URL scheme if missing: file:// for filepath-like input or http:// otherwise."""
parts = urlparse(url)
if parts.scheme:
return url
# Note: this does not match Windows filepath
if re.match(r'''^ # start with...
(
\. # ...a single dot,
(
\. | [^/\.]+ # optionally followed by
)? # either a second dot or some characters
)? # optional match of ".", ".." or ".blabla"
/ # at least one "/" for a file path,
. # and something after the "/"
''', parts.path, flags=re.VERBOSE):
return any_to_uri(url)
else:
return add_http_if_no_scheme(url)
"""
This module contains general purpose URL functions not found in the standard
library.
Some of the functions that used to be imported from this module have been moved
to the w3lib.url module. Always import those from there instead.
"""
import posixpath
import re
import six
from six.moves.urllib.parse import (ParseResult, urlunparse, urldefrag,
urlparse, parse_qsl, urlencode,
quote, unquote)
if not six.PY2:
from urllib.parse import unquote_to_bytes
# scrapy.utils.url was moved to w3lib.url and import * ensures this
# move doesn't break old code
from w3lib.url import *
from w3lib.url import _safe_chars
from scrapy.utils.python import to_bytes, to_native_str, to_unicode
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if not host:
return False
domains = [d.lower() for d in domains]
return any((host == d) or (host.endswith('.%s' % d)) for d in domains)
def url_is_from_spider(url, spider):
"""Return True if the url belongs to the given spider"""
return url_is_from_any_domain(url,
[spider.name] + list(getattr(spider, 'allowed_domains', [])))
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def _safe_ParseResult(parts, encoding='utf8', path_encoding='utf8'):
# IDNA encoding can fail for too long labels (>63 characters)
# or missing labels (e.g. http://.example.com)
try:
netloc = parts.netloc.encode('idna')
except UnicodeError:
netloc = parts.netloc
return (
to_native_str(parts.scheme),
to_native_str(netloc),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
quote(to_bytes(parts.params, path_encoding), _safe_chars),
# encoding of query and fragment follows page encoding
# or form-charset (if known and passed)
quote(to_bytes(parts.query, encoding), _safe_chars),
quote(to_bytes(parts.fragment, encoding), _safe_chars)
)
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
encoding=None):
"""Canonicalize the given url by applying the following procedures:
- sort query arguments, first by key, then by value
- percent encode paths ; non-ASCII characters are percent-encoded
using UTF-8 (RFC-3986)
- percent encode query arguments ; non-ASCII characters are percent-encoded
using passed `encoding` (UTF-8 by default)
- normalize all spaces (in query arguments) '+' (plus symbol)
- normalize percent encodings case (%2f -> %2F)
- remove query arguments with blank values (unless `keep_blank_values` is True)
- remove fragments (unless `keep_fragments` is True)
The url passed can be bytes or unicode, while the url returned is
always a native str (bytes in Python 2, unicode in Python 3).
For examples see the tests in tests/test_utils_url.py
"""
# If supplied `encoding` is not compatible with all characters in `url`,
# fallback to UTF-8 as safety net.
# UTF-8 can handle all Unicode characters,
# so we should be covered regarding URL normalization,
# if not for proper URL expected by remote website.
try:
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding=encoding)
except UnicodeEncodeError as e:
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding='utf8')
# 1. decode query-string as UTF-8 (or keep raw bytes),
# sort values,
# and percent-encode them back
if six.PY2:
keyvals = parse_qsl(query, keep_blank_values)
else:
# Python3's urllib.parse.parse_qsl does not work as wanted
# for percent-encoded characters that do not match passed encoding,
# they get lost.
#
# e.g., 'q=b%a3' becomes [('q', 'b\ufffd')]
# (ie. with 'REPLACEMENT CHARACTER' (U+FFFD),
# instead of \xa3 that you get with Python2's parse_qsl)
#
# what we want here is to keep raw bytes, and percent encode them
# so as to preserve whatever encoding what originally used.
#
# See https://tools.ietf.org/html/rfc3987#section-6.4:
#
# For example, it is possible to have a URI reference of
# "http://www.example.org/r%E9sum%E9.xml#r%C3%A9sum%C3%A9", where the
# document name is encoded in iso-8859-1 based on server settings, but
# where the fragment identifier is encoded in UTF-8 according to
# [XPointer]. The IRI corresponding to the above URI would be (in XML
# notation)
# "http://www.example.org/r%E9sum%E9.xml#résumé".
# Similar considerations apply to query parts. The functionality of
# IRIs (namely, to be able to include non-ASCII characters) can only be
# used if the query part is encoded in UTF-8.
keyvals = parse_qsl_to_bytes(query, keep_blank_values)
keyvals.sort()
query = urlencode(keyvals)
# 2. decode percent-encoded sequences in path as UTF-8 (or keep raw bytes)
# and percent-encode path again (this normalizes to upper-case %XX)
uqp = _unquotepath(path)
path = quote(uqp, _safe_chars) or '/'
fragment = '' if not keep_fragments else fragment
# every part should be safe already
return urlunparse((scheme, netloc.lower(), path, params, query, fragment))
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
if six.PY2:
# in Python 2, '%a3' becomes '\xa3', which is what we want
return unquote(path)
else:
# in Python 3,
# standard lib's unquote() does not work for non-UTF-8
# percent-escaped characters, they get lost.
# e.g., '%a3' becomes 'REPLACEMENT CHARACTER' (U+FFFD)
#
# unquote_to_bytes() returns raw bytes instead
return unquote_to_bytes(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
if isinstance(url, ParseResult):
return url
return urlparse(to_unicode(url, encoding))
if not six.PY2:
from urllib.parse import _coerce_args, unquote_to_bytes
def parse_qsl_to_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parse a query given as a string argument.
Data are returned as a list of name, value pairs as bytes.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
"""
# This code is the same as Python3's parse_qsl()
# (at https://hg.python.org/cpython/rev/c38ac7ab8d9a)
# except for the unquote(s, encoding, errors) calls replaced
# with unquote_to_bytes(s)
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote_to_bytes(name)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote_to_bytes(value)
value = _coerce_result(value)
r.append((name, value))
return r
def escape_ajax(url):
"""
Return the crawleable url according to:
http://code.google.com/web/ajaxcrawling/docs/getting-started.html
>>> escape_ajax("www.example.com/ajax.html#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?k1=v1&k2=v2#!key=value")
'www.example.com/ajax.html?k1=v1&k2=v2&_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html#!")
'www.example.com/ajax.html?_escaped_fragment_='
URLs that are not "AJAX crawlable" (according to Google) returned as-is:
>>> escape_ajax("www.example.com/ajax.html#key=value")
'www.example.com/ajax.html#key=value'
>>> escape_ajax("www.example.com/ajax.html#")
'www.example.com/ajax.html#'
>>> escape_ajax("www.example.com/ajax.html")
'www.example.com/ajax.html'
"""
defrag, frag = urldefrag(url)
if not frag.startswith('!'):
return url
return add_or_replace_parameter(defrag, '_escaped_fragment_', frag[1:])
def add_http_if_no_scheme(url):
"""Add http as the default scheme if it is missing from the url."""
match = re.match(r"^\w+://", url, flags=re.I)
if not match:
parts = urlparse(url)
scheme = "http:" if parts.netloc else "http://"
url = scheme + url
return url
def guess_scheme(url):
"""Add an URL scheme if missing: file:// for filepath-like input or http:// otherwise."""
parts = urlparse(url)
if parts.scheme:
return url
# Note: this does not match Windows filepath
if re.match(r'''^ # start with...
(
\. # ...a single dot,
(
\. | [^/\.]+ # optionally followed by
)? # either a second dot or some characters
)? # optional match of ".", ".." or ".blabla"
/ # at least one "/" for a file path,
. # and something after the "/"
''', parts.path, flags=re.VERBOSE):
return any_to_uri(url)
else:
return add_http_if_no_scheme(url)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-15-fixed/scrapy/scrapy/utils/url.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-15-buggy/scrapy/scrapy/utils/url.py |
scrapy-bug-31 | import time
from six.moves.http_cookiejar import (
CookieJar as _CookieJar, DefaultCookiePolicy, IPV4_RE
)
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_native_str
class CookieJar(object):
def __init__(self, policy=None, check_expired_frequency=10000):
self.policy = policy or DefaultCookiePolicy()
self.jar = _CookieJar(self.policy)
self.jar._cookies_lock = _DummyLock()
self.check_expired_frequency = check_expired_frequency
self.processed = 0
def extract_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.extract_cookies(wrsp, wreq)
def add_cookie_header(self, request):
wreq = WrappedRequest(request)
self.policy._now = self.jar._now = int(time.time())
# the cookiejar implementation iterates through all domains
# instead we restrict to potential matches on the domain
req_host = urlparse_cached(request).hostname
if not req_host:
return
if not IPV4_RE.search(req_host):
hosts = potential_domain_matches(req_host)
if '.' not in req_host:
hosts += [req_host + ".local"]
else:
hosts = [req_host]
cookies = []
for host in hosts:
if host in self.jar._cookies:
cookies += self.jar._cookies_for_domain(host, wreq)
attrs = self.jar._cookie_attrs(cookies)
if attrs:
if not wreq.has_header("Cookie"):
wreq.add_unredirected_header("Cookie", "; ".join(attrs))
self.processed += 1
if self.processed % self.check_expired_frequency == 0:
# This is still quite inefficient for large number of cookies
self.jar.clear_expired_cookies()
@property
def _cookies(self):
return self.jar._cookies
def clear_session_cookies(self, *args, **kwargs):
return self.jar.clear_session_cookies(*args, **kwargs)
def clear(self):
return self.jar.clear()
def __iter__(self):
return iter(self.jar)
def __len__(self):
return len(self.jar)
def set_policy(self, pol):
return self.jar.set_policy(pol)
def make_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.make_cookies(wrsp, wreq)
def set_cookie(self, cookie):
self.jar.set_cookie(cookie)
def set_cookie_if_ok(self, cookie, request):
self.jar.set_cookie_if_ok(cookie, WrappedRequest(request))
def potential_domain_matches(domain):
"""Potential domain matches for a cookie
>>> potential_domain_matches('www.example.com')
['www.example.com', 'example.com', '.www.example.com', '.example.com']
"""
matches = [domain]
try:
start = domain.index('.') + 1
end = domain.rindex('.')
while start < end:
matches.append(domain[start:])
start = domain.index('.', start) + 1
except ValueError:
pass
return matches + ['.' + d for d in matches]
class _DummyLock(object):
def acquire(self):
pass
def release(self):
pass
class WrappedRequest(object):
"""Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class
see http://docs.python.org/library/urllib2.html#urllib2.Request
"""
def __init__(self, request):
self.request = request
def get_full_url(self):
return self.request.url
def get_host(self):
return urlparse_cached(self.request).netloc
def get_type(self):
return urlparse_cached(self.request).scheme
def is_unverifiable(self):
"""Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.
It defaults to False. An unverifiable request is one whose URL the user did not have the
option to approve. For example, if the request is for an image in an
HTML document, and the user had no option to approve the automatic
fetching of the image, this should be true.
"""
return self.request.meta.get('is_unverifiable', False)
# python3 uses request.unverifiable
@property
def unverifiable(self):
return self.is_unverifiable()
def get_origin_req_host(self):
return urlparse_cached(self.request).hostname
def has_header(self, name):
return name in self.request.headers
def get_header(self, name, default=None):
return to_native_str(self.request.headers.get(name, default))
def header_items(self):
return [
(to_native_str(k), [to_native_str(x) for x in v])
for k, v in self.request.headers.items()
]
def add_unredirected_header(self, name, value):
self.request.headers.appendlist(name, value)
class WrappedResponse(object):
def __init__(self, response):
self.response = response
def info(self):
return self
# python3 cookiejars calls get_all
def get_all(self, name, default=None):
return [to_native_str(v) for v in self.response.headers.getlist(name)]
# python2 cookiejars calls getheaders
getheaders = get_all
import time
from six.moves.http_cookiejar import (
CookieJar as _CookieJar, DefaultCookiePolicy, IPV4_RE
)
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_native_str
class CookieJar(object):
def __init__(self, policy=None, check_expired_frequency=10000):
self.policy = policy or DefaultCookiePolicy()
self.jar = _CookieJar(self.policy)
self.jar._cookies_lock = _DummyLock()
self.check_expired_frequency = check_expired_frequency
self.processed = 0
def extract_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.extract_cookies(wrsp, wreq)
def add_cookie_header(self, request):
wreq = WrappedRequest(request)
self.policy._now = self.jar._now = int(time.time())
# the cookiejar implementation iterates through all domains
# instead we restrict to potential matches on the domain
req_host = urlparse_cached(request).hostname
if not req_host:
return
if not IPV4_RE.search(req_host):
hosts = potential_domain_matches(req_host)
if '.' not in req_host:
hosts += [req_host + ".local"]
else:
hosts = [req_host]
cookies = []
for host in hosts:
if host in self.jar._cookies:
cookies += self.jar._cookies_for_domain(host, wreq)
attrs = self.jar._cookie_attrs(cookies)
if attrs:
if not wreq.has_header("Cookie"):
wreq.add_unredirected_header("Cookie", "; ".join(attrs))
self.processed += 1
if self.processed % self.check_expired_frequency == 0:
# This is still quite inefficient for large number of cookies
self.jar.clear_expired_cookies()
@property
def _cookies(self):
return self.jar._cookies
def clear_session_cookies(self, *args, **kwargs):
return self.jar.clear_session_cookies(*args, **kwargs)
def clear(self):
return self.jar.clear()
def __iter__(self):
return iter(self.jar)
def __len__(self):
return len(self.jar)
def set_policy(self, pol):
return self.jar.set_policy(pol)
def make_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.make_cookies(wrsp, wreq)
def set_cookie(self, cookie):
self.jar.set_cookie(cookie)
def set_cookie_if_ok(self, cookie, request):
self.jar.set_cookie_if_ok(cookie, WrappedRequest(request))
def potential_domain_matches(domain):
"""Potential domain matches for a cookie
>>> potential_domain_matches('www.example.com')
['www.example.com', 'example.com', '.www.example.com', '.example.com']
"""
matches = [domain]
try:
start = domain.index('.') + 1
end = domain.rindex('.')
while start < end:
matches.append(domain[start:])
start = domain.index('.', start) + 1
except ValueError:
pass
return matches + ['.' + d for d in matches]
class _DummyLock(object):
def acquire(self):
pass
def release(self):
pass
class WrappedRequest(object):
"""Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class
see http://docs.python.org/library/urllib2.html#urllib2.Request
"""
def __init__(self, request):
self.request = request
def get_full_url(self):
return self.request.url
def get_host(self):
return urlparse_cached(self.request).netloc
def get_type(self):
return urlparse_cached(self.request).scheme
def is_unverifiable(self):
"""Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.
It defaults to False. An unverifiable request is one whose URL the user did not have the
option to approve. For example, if the request is for an image in an
HTML document, and the user had no option to approve the automatic
fetching of the image, this should be true.
"""
return self.request.meta.get('is_unverifiable', False)
# python3 uses request.unverifiable
@property
def unverifiable(self):
return self.is_unverifiable()
def get_origin_req_host(self):
return urlparse_cached(self.request).hostname
def has_header(self, name):
return name in self.request.headers
def get_header(self, name, default=None):
return to_native_str(self.request.headers.get(name, default),
errors='replace')
def header_items(self):
return [
(to_native_str(k, errors='replace'),
[to_native_str(x, errors='replace') for x in v])
for k, v in self.request.headers.items()
]
def add_unredirected_header(self, name, value):
self.request.headers.appendlist(name, value)
class WrappedResponse(object):
def __init__(self, response):
self.response = response
def info(self):
return self
# python3 cookiejars calls get_all
def get_all(self, name, default=None):
return [to_native_str(v, errors='replace')
for v in self.response.headers.getlist(name)]
# python2 cookiejars calls getheaders
getheaders = get_all
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-31-fixed/scrapy/scrapy/http/cookies.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-31-buggy/scrapy/scrapy/http/cookies.py |
scrapy-bug-11 | import struct
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from gzip import GzipFile
import six
import re
# - Python>=3.5 GzipFile's read() has issues returning leftover
# uncompressed data when input is corrupted
# (regression or bug-fix compared to Python 3.4)
# - read1(), which fetches data before raising EOFError on next call
# works here but is only available from Python>=3.3
# - scrapy does not support Python 3.2
# - Python 2.7 GzipFile works fine with standard read() + extrabuf
if six.PY2:
def read1(gzf, size=-1):
return gzf.read(size)
else:
def read1(gzf, size=-1):
return gzf.read1(size)
def gunzip(data):
"""Gunzip the given data and return as much data as possible.
This is resilient to CRC checksum errors.
"""
f = GzipFile(fileobj=BytesIO(data))
output = b''
chunk = b'.'
while chunk:
try:
chunk = read1(f, 8196)
output += chunk
except (IOError, EOFError, struct.error):
# complete only if there is some data, otherwise re-raise
# see issue 87 about catching struct.error
# some pages are quite small so output is '' and f.extrabuf
# contains the whole page content
if output or getattr(f, 'extrabuf', None):
try:
output += f.extrabuf
finally:
break
else:
raise
return output
_is_gzipped = re.compile(br'^application/(x-)?gzip\b', re.I).search
_is_octetstream = re.compile(br'^(application|binary)/octet-stream\b', re.I).search
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
cenc = response.headers.get('Content-Encoding', b'').lower()
return (_is_gzipped(ctype) or
(_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))
import struct
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from gzip import GzipFile
import six
import re
# - Python>=3.5 GzipFile's read() has issues returning leftover
# uncompressed data when input is corrupted
# (regression or bug-fix compared to Python 3.4)
# - read1(), which fetches data before raising EOFError on next call
# works here but is only available from Python>=3.3
# - scrapy does not support Python 3.2
# - Python 2.7 GzipFile works fine with standard read() + extrabuf
if six.PY2:
def read1(gzf, size=-1):
return gzf.read(size)
else:
def read1(gzf, size=-1):
return gzf.read1(size)
def gunzip(data):
"""Gunzip the given data and return as much data as possible.
This is resilient to CRC checksum errors.
"""
f = GzipFile(fileobj=BytesIO(data))
output = b''
chunk = b'.'
while chunk:
try:
chunk = read1(f, 8196)
output += chunk
except (IOError, EOFError, struct.error):
# complete only if there is some data, otherwise re-raise
# see issue 87 about catching struct.error
# some pages are quite small so output is '' and f.extrabuf
# contains the whole page content
if output or getattr(f, 'extrabuf', None):
try:
output += f.extrabuf
finally:
break
else:
raise
return output
_is_gzipped = re.compile(br'^application/(x-)?gzip\b', re.I).search
_is_octetstream = re.compile(br'^(application|binary)/octet-stream\b', re.I).search
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
cenc = response.headers.get('Content-Encoding', b'').lower()
return (_is_gzipped(ctype) or
(_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-11-fixed/scrapy/scrapy/utils/gz.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-11-buggy/scrapy/scrapy/utils/gz.py |
scrapy-bug-6 | """
Images Pipeline
See documentation in topics/media-pipeline.rst
"""
import functools
import hashlib
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from PIL import Image
from scrapy.utils.misc import md5sum
from scrapy.utils.python import to_bytes
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy.exceptions import DropItem
#TODO: from scrapy.pipelines.media import MediaPipeline
from scrapy.pipelines.files import FileException, FilesPipeline
class NoimagesDrop(DropItem):
"""Product with no images exception"""
class ImageException(FileException):
"""General image error exception"""
class ImagesPipeline(FilesPipeline):
"""Abstract pipeline that implement the image thumbnail generation logic
"""
MEDIA_NAME = 'image'
# Uppercase attributes kept for backward compatibility with code that subclasses
# ImagesPipeline. They may be overridden by settings.
MIN_WIDTH = 0
MIN_HEIGHT = 0
EXPIRES = 90
THUMBS = {}
DEFAULT_IMAGES_URLS_FIELD = 'image_urls'
DEFAULT_IMAGES_RESULT_FIELD = 'images'
def __init__(self, store_uri, download_func=None, settings=None):
super(ImagesPipeline, self).__init__(store_uri, settings=settings,
download_func=download_func)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
resolve = functools.partial(self._key_for_pipe,
base_class_name="ImagesPipeline",
settings=settings)
self.expires = settings.getint(
resolve("IMAGES_EXPIRES"), self.EXPIRES
)
if not hasattr(self, "IMAGES_RESULT_FIELD"):
self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD
if not hasattr(self, "IMAGES_URLS_FIELD"):
self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD
self.images_urls_field = settings.get(
resolve('IMAGES_URLS_FIELD'),
self.IMAGES_URLS_FIELD
)
self.images_result_field = settings.get(
resolve('IMAGES_RESULT_FIELD'),
self.IMAGES_RESULT_FIELD
)
self.min_width = settings.getint(
resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH
)
self.min_height = settings.getint(
resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT
)
self.thumbs = settings.get(
resolve('IMAGES_THUMBS'), self.THUMBS
)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
s3store.POLICY = settings['IMAGES_STORE_S3_ACL']
store_uri = settings['IMAGES_STORE']
return cls(store_uri, settings=settings)
def file_downloaded(self, response, request, info):
return self.image_downloaded(response, request, info)
def image_downloaded(self, response, request, info):
checksum = None
for path, image, buf in self.get_images(response, request, info):
if checksum is None:
buf.seek(0)
checksum = md5sum(buf)
width, height = image.size
self.store.persist_file(
path, buf, info,
meta={'width': width, 'height': height},
headers={'Content-Type': 'image/jpeg'})
return checksum
def get_images(self, response, request, info):
path = self.file_path(request, response=response, info=info)
orig_image = Image.open(BytesIO(response.body))
width, height = orig_image.size
if width < self.min_width or height < self.min_height:
raise ImageException("Image too small (%dx%d < %dx%d)" %
(width, height, self.min_width, self.min_height))
image, buf = self.convert_image(orig_image)
yield path, image, buf
for thumb_id, size in six.iteritems(self.thumbs):
thumb_path = self.thumb_path(request, thumb_id, response=response, info=info)
thumb_image, thumb_buf = self.convert_image(image, size)
yield thumb_path, thumb_image, thumb_buf
def convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
background = Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
image = image.convert('RGB')
if size:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
buf = BytesIO()
image.save(buf, 'JPEG')
return image, buf
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.images_urls_field, [])]
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.images_result_field in item.fields:
item[self.images_result_field] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are deprecated, '
'please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from image_key or file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() or image_key() methods have been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
## end of deprecation warning block
image_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'full/%s.jpg' % (image_guid)
def thumb_path(self, request, thumb_id, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.thumb_key(url) method is deprecated, please use '
'thumb_path(request, thumb_id, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from thumb_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if thumb_key() method has been overridden
if not hasattr(self.thumb_key, '_base'):
_warn()
return self.thumb_key(url, thumb_id)
## end of deprecation warning block
thumb_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'thumbs/%s/%s.jpg' % (thumb_id, thumb_guid)
# deprecated
def file_key(self, url):
return self.image_key(url)
file_key._base = True
# deprecated
def image_key(self, url):
return self.file_path(url)
image_key._base = True
# deprecated
def thumb_key(self, url, thumb_id):
return self.thumb_path(url, thumb_id)
thumb_key._base = True
"""
Images Pipeline
See documentation in topics/media-pipeline.rst
"""
import functools
import hashlib
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from PIL import Image
from scrapy.utils.misc import md5sum
from scrapy.utils.python import to_bytes
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy.exceptions import DropItem
#TODO: from scrapy.pipelines.media import MediaPipeline
from scrapy.pipelines.files import FileException, FilesPipeline
class NoimagesDrop(DropItem):
"""Product with no images exception"""
class ImageException(FileException):
"""General image error exception"""
class ImagesPipeline(FilesPipeline):
"""Abstract pipeline that implement the image thumbnail generation logic
"""
MEDIA_NAME = 'image'
# Uppercase attributes kept for backward compatibility with code that subclasses
# ImagesPipeline. They may be overridden by settings.
MIN_WIDTH = 0
MIN_HEIGHT = 0
EXPIRES = 90
THUMBS = {}
DEFAULT_IMAGES_URLS_FIELD = 'image_urls'
DEFAULT_IMAGES_RESULT_FIELD = 'images'
def __init__(self, store_uri, download_func=None, settings=None):
super(ImagesPipeline, self).__init__(store_uri, settings=settings,
download_func=download_func)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
resolve = functools.partial(self._key_for_pipe,
base_class_name="ImagesPipeline",
settings=settings)
self.expires = settings.getint(
resolve("IMAGES_EXPIRES"), self.EXPIRES
)
if not hasattr(self, "IMAGES_RESULT_FIELD"):
self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD
if not hasattr(self, "IMAGES_URLS_FIELD"):
self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD
self.images_urls_field = settings.get(
resolve('IMAGES_URLS_FIELD'),
self.IMAGES_URLS_FIELD
)
self.images_result_field = settings.get(
resolve('IMAGES_RESULT_FIELD'),
self.IMAGES_RESULT_FIELD
)
self.min_width = settings.getint(
resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH
)
self.min_height = settings.getint(
resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT
)
self.thumbs = settings.get(
resolve('IMAGES_THUMBS'), self.THUMBS
)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
s3store.POLICY = settings['IMAGES_STORE_S3_ACL']
store_uri = settings['IMAGES_STORE']
return cls(store_uri, settings=settings)
def file_downloaded(self, response, request, info):
return self.image_downloaded(response, request, info)
def image_downloaded(self, response, request, info):
checksum = None
for path, image, buf in self.get_images(response, request, info):
if checksum is None:
buf.seek(0)
checksum = md5sum(buf)
width, height = image.size
self.store.persist_file(
path, buf, info,
meta={'width': width, 'height': height},
headers={'Content-Type': 'image/jpeg'})
return checksum
def get_images(self, response, request, info):
path = self.file_path(request, response=response, info=info)
orig_image = Image.open(BytesIO(response.body))
width, height = orig_image.size
if width < self.min_width or height < self.min_height:
raise ImageException("Image too small (%dx%d < %dx%d)" %
(width, height, self.min_width, self.min_height))
image, buf = self.convert_image(orig_image)
yield path, image, buf
for thumb_id, size in six.iteritems(self.thumbs):
thumb_path = self.thumb_path(request, thumb_id, response=response, info=info)
thumb_image, thumb_buf = self.convert_image(image, size)
yield thumb_path, thumb_image, thumb_buf
def convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
background = Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
image = image.convert('RGB')
if size:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
buf = BytesIO()
image.save(buf, 'JPEG')
return image, buf
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.images_urls_field, [])]
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.images_result_field in item.fields:
item[self.images_result_field] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are deprecated, '
'please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from image_key or file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() or image_key() methods have been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
## end of deprecation warning block
image_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'full/%s.jpg' % (image_guid)
def thumb_path(self, request, thumb_id, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.thumb_key(url) method is deprecated, please use '
'thumb_path(request, thumb_id, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from thumb_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if thumb_key() method has been overridden
if not hasattr(self.thumb_key, '_base'):
_warn()
return self.thumb_key(url, thumb_id)
## end of deprecation warning block
thumb_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'thumbs/%s/%s.jpg' % (thumb_id, thumb_guid)
# deprecated
def file_key(self, url):
return self.image_key(url)
file_key._base = True
# deprecated
def image_key(self, url):
return self.file_path(url)
image_key._base = True
# deprecated
def thumb_key(self, url, thumb_id):
return self.thumb_path(url, thumb_id)
thumb_key._base = True
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-6-fixed/scrapy/scrapy/pipelines/images.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-6-buggy/scrapy/scrapy/pipelines/images.py |
scrapy-bug-16 | """
This module contains general purpose URL functions not found in the standard
library.
Some of the functions that used to be imported from this module have been moved
to the w3lib.url module. Always import those from there instead.
"""
import posixpath
import re
from six.moves.urllib.parse import (ParseResult, urlunparse, urldefrag,
urlparse, parse_qsl, urlencode,
unquote)
# scrapy.utils.url was moved to w3lib.url and import * ensures this
# move doesn't break old code
from w3lib.url import *
from w3lib.url import _safe_chars
from scrapy.utils.python import to_native_str
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if not host:
return False
domains = [d.lower() for d in domains]
return any((host == d) or (host.endswith('.%s' % d)) for d in domains)
def url_is_from_spider(url, spider):
"""Return True if the url belongs to the given spider"""
return url_is_from_any_domain(url,
[spider.name] + list(getattr(spider, 'allowed_domains', [])))
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
encoding=None):
"""Canonicalize the given url by applying the following procedures:
- sort query arguments, first by key, then by value
- percent encode paths and query arguments. non-ASCII characters are
percent-encoded using UTF-8 (RFC-3986)
- normalize all spaces (in query arguments) '+' (plus symbol)
- normalize percent encodings case (%2f -> %2F)
- remove query arguments with blank values (unless keep_blank_values is True)
- remove fragments (unless keep_fragments is True)
The url passed can be a str or unicode, while the url returned is always a
str.
For examples see the tests in tests/test_utils_url.py
"""
scheme, netloc, path, params, query, fragment = parse_url(url)
keyvals = parse_qsl(query, keep_blank_values)
keyvals.sort()
query = urlencode(keyvals)
# XXX: copied from w3lib.url.safe_url_string to add encoding argument
# path = to_native_str(path, encoding)
# path = moves.urllib.parse.quote(path, _safe_chars, encoding='latin1') or '/'
path = safe_url_string(_unquotepath(path)) or '/'
fragment = '' if not keep_fragments else fragment
return urlunparse((scheme, netloc.lower(), path, params, query, fragment))
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
return unquote(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
if isinstance(url, ParseResult):
return url
return urlparse(to_native_str(url, encoding))
def escape_ajax(url):
"""
Return the crawleable url according to:
http://code.google.com/web/ajaxcrawling/docs/getting-started.html
>>> escape_ajax("www.example.com/ajax.html#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?k1=v1&k2=v2#!key=value")
'www.example.com/ajax.html?k1=v1&k2=v2&_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html#!")
'www.example.com/ajax.html?_escaped_fragment_='
URLs that are not "AJAX crawlable" (according to Google) returned as-is:
>>> escape_ajax("www.example.com/ajax.html#key=value")
'www.example.com/ajax.html#key=value'
>>> escape_ajax("www.example.com/ajax.html#")
'www.example.com/ajax.html#'
>>> escape_ajax("www.example.com/ajax.html")
'www.example.com/ajax.html'
"""
defrag, frag = urldefrag(url)
if not frag.startswith('!'):
return url
return add_or_replace_parameter(defrag, '_escaped_fragment_', frag[1:])
def add_http_if_no_scheme(url):
"""Add http as the default scheme if it is missing from the url."""
match = re.match(r"^\w+://", url, flags=re.I)
if not match:
parts = urlparse(url)
scheme = "http:" if parts.netloc else "http://"
url = scheme + url
return url
def guess_scheme(url):
"""Add an URL scheme if missing: file:// for filepath-like input or http:// otherwise."""
parts = urlparse(url)
if parts.scheme:
return url
# Note: this does not match Windows filepath
if re.match(r'''^ # start with...
(
\. # ...a single dot,
(
\. | [^/\.]+ # optionally followed by
)? # either a second dot or some characters
)? # optional match of ".", ".." or ".blabla"
/ # at least one "/" for a file path,
. # and something after the "/"
''', parts.path, flags=re.VERBOSE):
return any_to_uri(url)
else:
return add_http_if_no_scheme(url)
"""
This module contains general purpose URL functions not found in the standard
library.
Some of the functions that used to be imported from this module have been moved
to the w3lib.url module. Always import those from there instead.
"""
import posixpath
import re
import six
from six.moves.urllib.parse import (ParseResult, urlunparse, urldefrag,
urlparse, parse_qsl, urlencode,
quote, unquote)
if six.PY3:
from urllib.parse import unquote_to_bytes
# scrapy.utils.url was moved to w3lib.url and import * ensures this
# move doesn't break old code
from w3lib.url import *
from w3lib.url import _safe_chars
from scrapy.utils.python import to_bytes, to_native_str, to_unicode
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if not host:
return False
domains = [d.lower() for d in domains]
return any((host == d) or (host.endswith('.%s' % d)) for d in domains)
def url_is_from_spider(url, spider):
"""Return True if the url belongs to the given spider"""
return url_is_from_any_domain(url,
[spider.name] + list(getattr(spider, 'allowed_domains', [])))
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def _safe_ParseResult(parts, encoding='utf8', path_encoding='utf8'):
return (
to_native_str(parts.scheme),
to_native_str(parts.netloc.encode('idna')),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
quote(to_bytes(parts.params, path_encoding), _safe_chars),
# encoding of query and fragment follows page encoding
# or form-charset (if known and passed)
quote(to_bytes(parts.query, encoding), _safe_chars),
quote(to_bytes(parts.fragment, encoding), _safe_chars)
)
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
encoding=None):
"""Canonicalize the given url by applying the following procedures:
- sort query arguments, first by key, then by value
- percent encode paths ; non-ASCII characters are percent-encoded
using UTF-8 (RFC-3986)
- percent encode query arguments ; non-ASCII characters are percent-encoded
using passed `encoding` (UTF-8 by default)
- normalize all spaces (in query arguments) '+' (plus symbol)
- normalize percent encodings case (%2f -> %2F)
- remove query arguments with blank values (unless `keep_blank_values` is True)
- remove fragments (unless `keep_fragments` is True)
The url passed can be bytes or unicode, while the url returned is
always a native str (bytes in Python 2, unicode in Python 3).
For examples see the tests in tests/test_utils_url.py
"""
# If supplied `encoding` is not compatible with all characters in `url`,
# fallback to UTF-8 as safety net.
# UTF-8 can handle all Unicode characters,
# so we should be covered regarding URL normalization,
# if not for proper URL expected by remote website.
try:
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding=encoding)
except UnicodeError as e:
if encoding != 'utf8':
scheme, netloc, path, params, query, fragment = _safe_ParseResult(
parse_url(url), encoding='utf8')
else:
raise
# 1. decode query-string as UTF-8 (or keep raw bytes),
# sort values,
# and percent-encode them back
if not six.PY2:
# Python3's urllib.parse.parse_qsl does not work as wanted
# for percent-encoded characters that do not match passed encoding,
# they get lost.
#
# e.g., 'q=b%a3' becomes [('q', 'b\ufffd')]
# (ie. with 'REPLACEMENT CHARACTER' (U+FFFD),
# instead of \xa3 that you get with Python2's parse_qsl)
#
# what we want here is to keep raw bytes, and percent encode them
# so as to preserve whatever encoding what originally used.
#
# See https://tools.ietf.org/html/rfc3987#section-6.4:
#
# For example, it is possible to have a URI reference of
# "http://www.example.org/r%E9sum%E9.xml#r%C3%A9sum%C3%A9", where the
# document name is encoded in iso-8859-1 based on server settings, but
# where the fragment identifier is encoded in UTF-8 according to
# [XPointer]. The IRI corresponding to the above URI would be (in XML
# notation)
# "http://www.example.org/r%E9sum%E9.xml#résumé".
# Similar considerations apply to query parts. The functionality of
# IRIs (namely, to be able to include non-ASCII characters) can only be
# used if the query part is encoded in UTF-8.
keyvals = parse_qsl_to_bytes(query, keep_blank_values)
else:
keyvals = parse_qsl(query, keep_blank_values)
keyvals.sort()
query = urlencode(keyvals)
# 2. decode percent-encoded sequences in path as UTF-8 (or keep raw bytes)
# and percent-encode path again (this normalizes to upper-case %XX)
uqp = _unquotepath(path)
path = quote(uqp, _safe_chars) or '/'
fragment = '' if not keep_fragments else fragment
# every part should be safe already
return urlunparse((scheme, netloc.lower(), path, params, query, fragment))
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
if six.PY3:
# standard lib's unquote() does not work in Python 3
# for non-UTF-8 percent-escaped characters, they get lost.
# e.g., '%a3' becomes 'REPLACEMENT CHARACTER' (U+FFFD)
#
# unquote_to_bytes() returns raw bytes instead
return unquote_to_bytes(path)
else:
# in Python 2, '%a3' becomes '\xa3', which is what we want
return unquote(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
if isinstance(url, ParseResult):
return url
return urlparse(to_unicode(url, encoding))
if six.PY3:
from urllib.parse import _coerce_args, unquote_to_bytes
def parse_qsl_to_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parse a query given as a string argument.
Data are returned as a list of name, value pairs as bytes.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
"""
# This code is the same as Python3's parse_qsl()
# (at https://hg.python.org/cpython/rev/c38ac7ab8d9a)
# except for the unquote(s, encoding, errors) calls replaced
# with unquote_to_bytes(s)
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote_to_bytes(name)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote_to_bytes(value)
value = _coerce_result(value)
r.append((name, value))
return r
def escape_ajax(url):
"""
Return the crawleable url according to:
http://code.google.com/web/ajaxcrawling/docs/getting-started.html
>>> escape_ajax("www.example.com/ajax.html#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?k1=v1&k2=v2#!key=value")
'www.example.com/ajax.html?k1=v1&k2=v2&_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html#!")
'www.example.com/ajax.html?_escaped_fragment_='
URLs that are not "AJAX crawlable" (according to Google) returned as-is:
>>> escape_ajax("www.example.com/ajax.html#key=value")
'www.example.com/ajax.html#key=value'
>>> escape_ajax("www.example.com/ajax.html#")
'www.example.com/ajax.html#'
>>> escape_ajax("www.example.com/ajax.html")
'www.example.com/ajax.html'
"""
defrag, frag = urldefrag(url)
if not frag.startswith('!'):
return url
return add_or_replace_parameter(defrag, '_escaped_fragment_', frag[1:])
def add_http_if_no_scheme(url):
"""Add http as the default scheme if it is missing from the url."""
match = re.match(r"^\w+://", url, flags=re.I)
if not match:
parts = urlparse(url)
scheme = "http:" if parts.netloc else "http://"
url = scheme + url
return url
def guess_scheme(url):
"""Add an URL scheme if missing: file:// for filepath-like input or http:// otherwise."""
parts = urlparse(url)
if parts.scheme:
return url
# Note: this does not match Windows filepath
if re.match(r'''^ # start with...
(
\. # ...a single dot,
(
\. | [^/\.]+ # optionally followed by
)? # either a second dot or some characters
)? # optional match of ".", ".." or ".blabla"
/ # at least one "/" for a file path,
. # and something after the "/"
''', parts.path, flags=re.VERBOSE):
return any_to_uri(url)
else:
return add_http_if_no_scheme(url)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-16-fixed/scrapy/scrapy/utils/url.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-16-buggy/scrapy/scrapy/utils/url.py |
scrapy-bug-39 | """
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
class Spider(object_ref):
"""Base class for scrapy spiders. All spiders must inherit from this
class.
"""
name = None
custom_settings = None
def __init__(self, name=None, **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
self.__dict__.update(kwargs)
if not hasattr(self, 'start_urls'):
self.start_urls = []
@property
def logger(self):
logger = logging.getLogger(self.name)
return logging.LoggerAdapter(logger, {'spider': self})
def log(self, message, level=logging.DEBUG, **kw):
"""Log the given message at the given log level
This helper wraps a log call to the logger within the spider, but you
can use it directly (e.g. Spider.logger.info('msg')) or use any other
Python logger too.
"""
self.logger.log(level, message, **kw)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
return spider
def set_crawler(self, crawler):
warnings.warn("set_crawler is deprecated, instantiate and bound the "
"spider to this crawler with from_crawler method "
"instead.",
category=ScrapyDeprecationWarning, stacklevel=2)
assert not hasattr(self, 'crawler'), "Spider already bounded to a " \
"crawler"
self._set_crawler(crawler)
def _set_crawler(self, crawler):
self.crawler = crawler
self.settings = crawler.settings
crawler.signals.connect(self.close, signals.spider_closed)
def start_requests(self):
if self.make_requests_from_url is not Spider.make_requests_from_url:
warnings.warn(
"Spider.make_requests_from_url method is deprecated; "
"it won't be called in future Scrapy releases. "
"Please override start_requests method instead."
)
for url in self.start_urls:
yield self.make_requests_from_url(url)
else:
for url in self.start_urls:
yield Request(url, dont_filter=True)
def make_requests_from_url(self, url):
""" This method is deprecated. """
return Request(url, dont_filter=True)
def parse(self, response):
raise NotImplementedError
@classmethod
def update_settings(cls, settings):
settings.setdict(cls.custom_settings or {}, priority='spider')
@classmethod
def handles_request(cls, request):
return url_is_from_spider(request.url, cls)
@staticmethod
def close(spider, reason):
closed = getattr(spider, 'closed', None)
if callable(closed):
return closed(reason)
def __str__(self):
return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
__repr__ = __str__
BaseSpider = create_deprecated_class('BaseSpider', Spider)
class ObsoleteClass(object):
def __init__(self, message):
self.message = message
def __getattr__(self, name):
raise AttributeError(self.message)
spiders = ObsoleteClass(
'"from scrapy.spider import spiders" no longer works - use '
'"from scrapy.spiderloader import SpiderLoader" and instantiate '
'it with your project settings"'
)
# Top-level imports
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
"""
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
class Spider(object_ref):
"""Base class for scrapy spiders. All spiders must inherit from this
class.
"""
name = None
custom_settings = None
def __init__(self, name=None, **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
self.__dict__.update(kwargs)
if not hasattr(self, 'start_urls'):
self.start_urls = []
@property
def logger(self):
logger = logging.getLogger(self.name)
return logging.LoggerAdapter(logger, {'spider': self})
def log(self, message, level=logging.DEBUG, **kw):
"""Log the given message at the given log level
This helper wraps a log call to the logger within the spider, but you
can use it directly (e.g. Spider.logger.info('msg')) or use any other
Python logger too.
"""
self.logger.log(level, message, **kw)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
return spider
def set_crawler(self, crawler):
warnings.warn("set_crawler is deprecated, instantiate and bound the "
"spider to this crawler with from_crawler method "
"instead.",
category=ScrapyDeprecationWarning, stacklevel=2)
assert not hasattr(self, 'crawler'), "Spider already bounded to a " \
"crawler"
self._set_crawler(crawler)
def _set_crawler(self, crawler):
self.crawler = crawler
self.settings = crawler.settings
crawler.signals.connect(self.close, signals.spider_closed)
def start_requests(self):
cls = self.__class__
if cls.make_requests_from_url is not Spider.make_requests_from_url:
warnings.warn(
"Spider.make_requests_from_url method is deprecated; it "
"won't be called in future Scrapy releases. Please "
"override Spider.start_requests method instead (see %s.%s)." % (
cls.__module__, cls.__name__
),
)
for url in self.start_urls:
yield self.make_requests_from_url(url)
else:
for url in self.start_urls:
yield Request(url, dont_filter=True)
def make_requests_from_url(self, url):
""" This method is deprecated. """
return Request(url, dont_filter=True)
def parse(self, response):
raise NotImplementedError
@classmethod
def update_settings(cls, settings):
settings.setdict(cls.custom_settings or {}, priority='spider')
@classmethod
def handles_request(cls, request):
return url_is_from_spider(request.url, cls)
@staticmethod
def close(spider, reason):
closed = getattr(spider, 'closed', None)
if callable(closed):
return closed(reason)
def __str__(self):
return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
__repr__ = __str__
BaseSpider = create_deprecated_class('BaseSpider', Spider)
class ObsoleteClass(object):
def __init__(self, message):
self.message = message
def __getattr__(self, name):
raise AttributeError(self.message)
spiders = ObsoleteClass(
'"from scrapy.spider import spiders" no longer works - use '
'"from scrapy.spiderloader import SpiderLoader" and instantiate '
'it with your project settings"'
)
# Top-level imports
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-39-fixed/scrapy/scrapy/spiders/__init__.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-39-buggy/scrapy/scrapy/spiders/__init__.py |
scrapy-bug-14 | import struct
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from gzip import GzipFile
import six
# - Python>=3.5 GzipFile's read() has issues returning leftover
# uncompressed data when input is corrupted
# (regression or bug-fix compared to Python 3.4)
# - read1(), which fetches data before raising EOFError on next call
# works here but is only available from Python>=3.3
# - scrapy does not support Python 3.2
# - Python 2.7 GzipFile works fine with standard read() + extrabuf
if six.PY2:
def read1(gzf, size=-1):
return gzf.read(size)
else:
def read1(gzf, size=-1):
return gzf.read1(size)
def gunzip(data):
"""Gunzip the given data and return as much data as possible.
This is resilient to CRC checksum errors.
"""
f = GzipFile(fileobj=BytesIO(data))
output = b''
chunk = b'.'
while chunk:
try:
chunk = read1(f, 8196)
output += chunk
except (IOError, EOFError, struct.error):
# complete only if there is some data, otherwise re-raise
# see issue 87 about catching struct.error
# some pages are quite small so output is '' and f.extrabuf
# contains the whole page content
if output or getattr(f, 'extrabuf', None):
try:
output += f.extrabuf
finally:
break
else:
raise
return output
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
return ctype in (b'application/x-gzip', b'application/gzip')
import struct
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from gzip import GzipFile
import six
# - Python>=3.5 GzipFile's read() has issues returning leftover
# uncompressed data when input is corrupted
# (regression or bug-fix compared to Python 3.4)
# - read1(), which fetches data before raising EOFError on next call
# works here but is only available from Python>=3.3
# - scrapy does not support Python 3.2
# - Python 2.7 GzipFile works fine with standard read() + extrabuf
if six.PY2:
def read1(gzf, size=-1):
return gzf.read(size)
else:
def read1(gzf, size=-1):
return gzf.read1(size)
def gunzip(data):
"""Gunzip the given data and return as much data as possible.
This is resilient to CRC checksum errors.
"""
f = GzipFile(fileobj=BytesIO(data))
output = b''
chunk = b'.'
while chunk:
try:
chunk = read1(f, 8196)
output += chunk
except (IOError, EOFError, struct.error):
# complete only if there is some data, otherwise re-raise
# see issue 87 about catching struct.error
# some pages are quite small so output is '' and f.extrabuf
# contains the whole page content
if output or getattr(f, 'extrabuf', None):
try:
output += f.extrabuf
finally:
break
else:
raise
return output
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
return ctype in (b'application/x-gzip', b'application/gzip')
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-14-fixed/scrapy/scrapy/utils/gz.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-14-buggy/scrapy/scrapy/utils/gz.py |
scrapy-bug-9 | """
Mail sending helpers
See documentation in docs/topics/email.rst
"""
import logging
from six.moves import cStringIO as StringIO
import six
from email.utils import COMMASPACE, formatdate
from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_text import MIMEText
from six.moves.email_mime_base import MIMEBase
if six.PY2:
from email.MIMENonMultipart import MIMENonMultipart
from email import Encoders
else:
from email.mime.nonmultipart import MIMENonMultipart
from email import encoders as Encoders
from twisted.internet import defer, reactor, ssl
logger = logging.getLogger(__name__)
class MailSender(object):
def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',
smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):
self.smtphost = smtphost
self.smtpport = smtpport
self.smtpuser = smtpuser
self.smtppass = smtppass
self.smtptls = smtptls
self.smtpssl = smtpssl
self.mailfrom = mailfrom
self.debug = debug
@classmethod
def from_settings(cls, settings):
return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],
settings['MAIL_PASS'], settings.getint('MAIL_PORT'),
settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))
def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mimetype.split('/', 1))
msg['From'] = self.mailfrom
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
rcpts = to[:]
if cc:
rcpts.extend(cc)
msg['Cc'] = COMMASPACE.join(cc)
if charset:
msg.set_charset(charset)
if attachs:
msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
for attach_name, mimetype, f in attachs:
part = MIMEBase(*mimetype.split('/'))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' \
% attach_name)
msg.attach(part)
else:
msg.set_payload(body)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
if self.debug:
logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': len(attachs)})
return
dfd = self._sendmail(rcpts, msg.as_string())
dfd.addCallbacks(self._sent_ok, self._sent_failed,
callbackArgs=[to, cc, subject, len(attachs)],
errbackArgs=[to, cc, subject, len(attachs)])
reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
return dfd
def _sent_ok(self, result, to, cc, subject, nattachs):
logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs})
def _sent_failed(self, failure, to, cc, subject, nattachs):
errstr = str(failure.value)
logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
'- %(mailerr)s',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs, 'mailerr': errstr})
def _sendmail(self, to_addrs, msg):
# Import twisted.mail here because it is not available in python3
from twisted.mail.smtp import ESMTPSenderFactory
msg = StringIO(msg)
d = defer.Deferred()
factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \
to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \
requireTransportSecurity=self.smtptls)
factory.noisy = False
if self.smtpssl:
reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
"""
Mail sending helpers
See documentation in docs/topics/email.rst
"""
import logging
from six.moves import cStringIO as StringIO
import six
from email.utils import COMMASPACE, formatdate
from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_text import MIMEText
from six.moves.email_mime_base import MIMEBase
if six.PY2:
from email.MIMENonMultipart import MIMENonMultipart
from email import Encoders
else:
from email.mime.nonmultipart import MIMENonMultipart
from email import encoders as Encoders
from twisted.internet import defer, reactor, ssl
logger = logging.getLogger(__name__)
class MailSender(object):
def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',
smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):
self.smtphost = smtphost
self.smtpport = smtpport
self.smtpuser = smtpuser
self.smtppass = smtppass
self.smtptls = smtptls
self.smtpssl = smtpssl
self.mailfrom = mailfrom
self.debug = debug
@classmethod
def from_settings(cls, settings):
return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],
settings['MAIL_PASS'], settings.getint('MAIL_PORT'),
settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))
def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mimetype.split('/', 1))
msg['From'] = self.mailfrom
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
rcpts = to[:]
if cc:
rcpts.extend(cc)
msg['Cc'] = COMMASPACE.join(cc)
if charset:
msg.set_charset(charset)
if attachs:
msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
for attach_name, mimetype, f in attachs:
part = MIMEBase(*mimetype.split('/'))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' \
% attach_name)
msg.attach(part)
else:
msg.set_payload(body)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
if self.debug:
logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': len(attachs)})
return
dfd = self._sendmail(rcpts, msg.as_string())
dfd.addCallbacks(self._sent_ok, self._sent_failed,
callbackArgs=[to, cc, subject, len(attachs)],
errbackArgs=[to, cc, subject, len(attachs)])
reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
return dfd
def _sent_ok(self, result, to, cc, subject, nattachs):
logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs})
def _sent_failed(self, failure, to, cc, subject, nattachs):
errstr = str(failure.value)
logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
'- %(mailerr)s',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs, 'mailerr': errstr})
def _sendmail(self, to_addrs, msg):
# Import twisted.mail here because it is not available in python3
from twisted.mail.smtp import ESMTPSenderFactory
msg = StringIO(msg)
d = defer.Deferred()
factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \
to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \
requireTransportSecurity=self.smtptls)
factory.noisy = False
if self.smtpssl:
reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-9-fixed/scrapy/scrapy/mail.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-9-buggy/scrapy/scrapy/mail.py |
scrapy-bug-26 | import six
import json
import copy
import warnings
from collections import MutableMapping
from importlib import import_module
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
from . import default_settings
SETTINGS_PRIORITIES = {
'default': 0,
'command': 10,
'project': 20,
'spider': 30,
'cmdline': 40,
}
def get_settings_priority(priority):
"""
Small helper function that looks up a given string priority in the
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` dictionary and returns its
numerical value, or directly returns a given numerical priority.
"""
if isinstance(priority, six.string_types):
return SETTINGS_PRIORITIES[priority]
else:
return priority
class SettingsAttribute(object):
"""Class for storing data related to settings attributes.
This class is intended for internal usage, you should try Settings class
for settings configuration, not this one.
"""
def __init__(self, value, priority):
self.value = value
if isinstance(self.value, BaseSettings):
self.priority = max(self.value.maxpriority(), priority)
else:
self.priority = priority
def set(self, value, priority):
"""Sets value if priority is higher or equal than current priority."""
if isinstance(self.value, BaseSettings):
# Ignore self.priority if self.value has per-key priorities
self.value.update(value, priority)
self.priority = max(self.value.maxpriority(), priority)
else:
if priority >= self.priority:
self.value = value
self.priority = priority
def __str__(self):
return "<SettingsAttribute value={self.value!r} " \
"priority={self.priority}>".format(self=self)
__repr__ = __str__
class BaseSettings(MutableMapping):
"""
Instances of this class behave like dictionaries, but store priorities
along with their ``(key, value)`` pairs, and can be frozen (i.e. marked
immutable).
Key-value entries can be passed on initialization with the ``values``
argument, and they would take the ``priority`` level (unless ``values`` is
already an instance of :class:`~scrapy.settings.BaseSettings`, in which
case the existing priority levels will be kept). If the ``priority``
argument is a string, the priority name will be looked up in
:attr:`~scrapy.settings.SETTINGS_PRIORITIES`. Otherwise, a specific integer
should be provided.
Once the object is created, new settings can be loaded or updated with the
:meth:`~scrapy.settings.BaseSettings.set` method, and can be accessed with
the square bracket notation of dictionaries, or with the
:meth:`~scrapy.settings.BaseSettings.get` method of the instance and its
value conversion variants. When requesting a stored key, the value with the
highest priority will be retrieved.
"""
def __init__(self, values=None, priority='project'):
self.frozen = False
self.attributes = {}
self.update(values, priority)
def __getitem__(self, opt_name):
value = None
if opt_name in self:
value = self.attributes[opt_name].value
return value
def __contains__(self, name):
return name in self.attributes
def get(self, name, default=None):
"""
Get a setting value without affecting its original type.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return self[name] if self[name] is not None else default
def getbool(self, name, default=False):
"""
Get a setting value as a boolean.
``1``, ``'1'``, and ``True`` return ``True``, while ``0``, ``'0'``,
``False`` and ``None`` return ``False``.
For example, settings populated through environment variables set to
``'0'`` will return ``False`` when using this method.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return bool(int(self.get(name, default)))
def getint(self, name, default=0):
"""
Get a setting value as an int.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return int(self.get(name, default))
def getfloat(self, name, default=0.0):
"""
Get a setting value as a float.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return float(self.get(name, default))
def getlist(self, name, default=None):
"""
Get a setting value as a list. If the setting original type is a list, a
copy of it will be returned. If it's a string it will be split by ",".
For example, settings populated through environment variables set to
``'one,two'`` will return a list ['one', 'two'] when using this method.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
value = self.get(name, default or [])
if isinstance(value, six.string_types):
value = value.split(',')
return list(value)
def getdict(self, name, default=None):
"""
Get a setting value as a dictionary. If the setting original type is a
dictionary, a copy of it will be returned. If it is a string it will be
evaluated as a JSON dictionary. In the case that it is a
:class:`~scrapy.settings.BaseSettings` instance itself, it will be
converted to a dictionary, containing all its current settings values
as they would be returned by :meth:`~scrapy.settings.BaseSettings.get`,
and losing all information about priority and mutability.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
value = self.get(name, default or {})
if isinstance(value, six.string_types):
value = json.loads(value)
return dict(value)
def _getcomposite(self, name):
# DO NOT USE THIS FUNCTION IN YOUR CUSTOM PROJECTS
# It's for internal use in the transition away from the _BASE settings
# and will be removed along with _BASE support in a future release
basename = name + "_BASE"
if basename in self:
warnings.warn('_BASE settings are deprecated.',
category=ScrapyDeprecationWarning)
compsett = BaseSettings(self[name + "_BASE"], priority='default')
compsett.update(self[name])
return compsett
else:
return self[name]
def getpriority(self, name):
"""
Return the current numerical priority value of a setting, or ``None`` if
the given ``name`` does not exist.
:param name: the setting name
:type name: string
"""
prio = None
if name in self:
prio = self.attributes[name].priority
return prio
def maxpriority(self):
"""
Return the numerical value of the highest priority present throughout
all settings, or the numerical value for ``default`` from
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` if there are no settings
stored.
"""
if len(self) > 0:
return max(self.getpriority(name) for name in self)
else:
return get_settings_priority('default')
def __setitem__(self, name, value):
self.set(name, value)
def set(self, name, value, priority='project'):
"""
Store a key/value attribute with a given priority.
Settings should be populated *before* configuring the Crawler object
(through the :meth:`~scrapy.crawler.Crawler.configure` method),
otherwise they won't have any effect.
:param name: the setting name
:type name: string
:param value: the value to associate with the setting
:type value: any
:param priority: the priority of the setting. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: string or int
"""
self._assert_mutability()
priority = get_settings_priority(priority)
if name not in self:
if isinstance(value, SettingsAttribute):
self.attributes[name] = value
else:
self.attributes[name] = SettingsAttribute(value, priority)
else:
self.attributes[name].set(value, priority)
def setdict(self, values, priority='project'):
self.update(values, priority)
def setmodule(self, module, priority='project'):
"""
Store settings from a module with a given priority.
This is a helper function that calls
:meth:`~scrapy.settings.BaseSettings.set` for every globally declared
uppercase variable of ``module`` with the provided ``priority``.
:param module: the module or the path of the module
:type module: module object or string
:param priority: the priority of the settings. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: string or int
"""
self._assert_mutability()
if isinstance(module, six.string_types):
module = import_module(module)
for key in dir(module):
if key.isupper():
self.set(key, getattr(module, key), priority)
def update(self, values, priority='project'):
"""
Store key/value pairs with a given priority.
This is a helper function that calls
:meth:`~scrapy.settings.BaseSettings.set` for every item of ``values``
with the provided ``priority``.
If ``values`` is a string, it is assumed to be JSON-encoded and parsed
into a dict with ``json.loads()`` first. If it is a
:class:`~scrapy.settings.BaseSettings` instance, the per-key priorities
will be used and the ``priority`` parameter ignored. This allows
inserting/updating settings with different priorities with a single
command.
:param values: the settings names and values
:type values: dict or string or :class:`~scrapy.settings.BaseSettings`
:param priority: the priority of the settings. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: string or int
"""
self._assert_mutability()
if isinstance(values, six.string_types):
values = json.loads(values)
if values is not None:
if isinstance(values, BaseSettings):
for name, value in six.iteritems(values):
self.set(name, value, values.getpriority(name))
else:
for name, value in six.iteritems(values):
self.set(name, value, priority)
def delete(self, name, priority='project'):
self._assert_mutability()
priority = get_settings_priority(priority)
if priority >= self.getpriority(name):
del self.attributes[name]
def __delitem__(self, name):
self._assert_mutability()
del self.attributes[name]
def _assert_mutability(self):
if self.frozen:
raise TypeError("Trying to modify an immutable Settings object")
def copy(self):
"""
Make a deep copy of current settings.
This method returns a new instance of the :class:`Settings` class,
populated with the same values and their priorities.
Modifications to the new object won't be reflected on the original
settings.
"""
return copy.deepcopy(self)
def freeze(self):
"""
Disable further changes to the current settings.
After calling this method, the present state of the settings will become
immutable. Trying to change values through the :meth:`~set` method and
its variants won't be possible and will be alerted.
"""
self.frozen = True
def frozencopy(self):
"""
Return an immutable copy of the current settings.
Alias for a :meth:`~freeze` call in the object returned by :meth:`copy`.
"""
copy = self.copy()
copy.freeze()
return copy
def __iter__(self):
return iter(self.attributes)
def __len__(self):
return len(self.attributes)
def __str__(self):
return str(self.attributes)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.attributes)
@property
def overrides(self):
warnings.warn("`Settings.overrides` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='cmdline')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._overrides
except AttributeError:
self._overrides = o = _DictProxy(self, 'cmdline')
return o
@property
def defaults(self):
warnings.warn("`Settings.defaults` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='default')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._defaults
except AttributeError:
self._defaults = o = _DictProxy(self, 'default')
return o
class _DictProxy(MutableMapping):
def __init__(self, settings, priority):
self.o = {}
self.settings = settings
self.priority = priority
def __len__(self):
return len(self.o)
def __getitem__(self, k):
return self.o[k]
def __setitem__(self, k, v):
self.settings.set(k, v, priority=self.priority)
self.o[k] = v
def __delitem__(self, k):
del self.o[k]
def __iter__(self, k, v):
return iter(self.o)
class Settings(BaseSettings):
"""
This object stores Scrapy settings for the configuration of internal
components, and can be used for any further customization.
It is a direct subclass and supports all methods of
:class:`~scrapy.settings.BaseSettings`. Additionally, after instantiation
of this class, the new object will have the global default settings
described on :ref:`topics-settings-ref` already populated.
"""
def __init__(self, values=None, priority='project'):
# Do not pass kwarg values here. We don't want to promote user-defined
# dicts, and we want to update, not replace, default dicts with the
# values given by the user
super(Settings, self).__init__()
self.setmodule(default_settings, 'default')
# Promote default dictionaries to BaseSettings instances for per-key
# priorities
for name, val in six.iteritems(self):
if isinstance(val, dict):
self.set(name, BaseSettings(val, 'default'), 'default')
self.update(values, priority)
class CrawlerSettings(Settings):
def __init__(self, settings_module=None, **kw):
self.settings_module = settings_module
Settings.__init__(self, **kw)
def __getitem__(self, opt_name):
if opt_name in self.overrides:
return self.overrides[opt_name]
if self.settings_module and hasattr(self.settings_module, opt_name):
return getattr(self.settings_module, opt_name)
if opt_name in self.defaults:
return self.defaults[opt_name]
return Settings.__getitem__(self, opt_name)
def __str__(self):
return "<CrawlerSettings module=%r>" % self.settings_module
CrawlerSettings = create_deprecated_class(
'CrawlerSettings', CrawlerSettings,
new_class_path='scrapy.settings.Settings')
def iter_default_settings():
"""Return the default settings as an iterator of (name, value) tuples"""
for name in dir(default_settings):
if name.isupper():
yield name, getattr(default_settings, name)
def overridden_settings(settings):
"""Return a dict of the settings that have been overridden"""
for name, defvalue in iter_default_settings():
value = settings[name]
if not isinstance(defvalue, dict) and value != defvalue:
yield name, value
import six
import json
import copy
import warnings
from collections import MutableMapping
from importlib import import_module
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
from . import default_settings
SETTINGS_PRIORITIES = {
'default': 0,
'command': 10,
'project': 20,
'spider': 30,
'cmdline': 40,
}
def get_settings_priority(priority):
"""
Small helper function that looks up a given string priority in the
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` dictionary and returns its
numerical value, or directly returns a given numerical priority.
"""
if isinstance(priority, six.string_types):
return SETTINGS_PRIORITIES[priority]
else:
return priority
class SettingsAttribute(object):
"""Class for storing data related to settings attributes.
This class is intended for internal usage, you should try Settings class
for settings configuration, not this one.
"""
def __init__(self, value, priority):
self.value = value
if isinstance(self.value, BaseSettings):
self.priority = max(self.value.maxpriority(), priority)
else:
self.priority = priority
def set(self, value, priority):
"""Sets value if priority is higher or equal than current priority."""
if isinstance(self.value, BaseSettings):
# Ignore self.priority if self.value has per-key priorities
self.value.update(value, priority)
self.priority = max(self.value.maxpriority(), priority)
else:
if priority >= self.priority:
self.value = value
self.priority = priority
def __str__(self):
return "<SettingsAttribute value={self.value!r} " \
"priority={self.priority}>".format(self=self)
__repr__ = __str__
class BaseSettings(MutableMapping):
"""
Instances of this class behave like dictionaries, but store priorities
along with their ``(key, value)`` pairs, and can be frozen (i.e. marked
immutable).
Key-value entries can be passed on initialization with the ``values``
argument, and they would take the ``priority`` level (unless ``values`` is
already an instance of :class:`~scrapy.settings.BaseSettings`, in which
case the existing priority levels will be kept). If the ``priority``
argument is a string, the priority name will be looked up in
:attr:`~scrapy.settings.SETTINGS_PRIORITIES`. Otherwise, a specific integer
should be provided.
Once the object is created, new settings can be loaded or updated with the
:meth:`~scrapy.settings.BaseSettings.set` method, and can be accessed with
the square bracket notation of dictionaries, or with the
:meth:`~scrapy.settings.BaseSettings.get` method of the instance and its
value conversion variants. When requesting a stored key, the value with the
highest priority will be retrieved.
"""
def __init__(self, values=None, priority='project'):
self.frozen = False
self.attributes = {}
self.update(values, priority)
def __getitem__(self, opt_name):
value = None
if opt_name in self:
value = self.attributes[opt_name].value
return value
def __contains__(self, name):
return name in self.attributes
def get(self, name, default=None):
"""
Get a setting value without affecting its original type.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return self[name] if self[name] is not None else default
def getbool(self, name, default=False):
"""
Get a setting value as a boolean.
``1``, ``'1'``, and ``True`` return ``True``, while ``0``, ``'0'``,
``False`` and ``None`` return ``False``.
For example, settings populated through environment variables set to
``'0'`` will return ``False`` when using this method.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return bool(int(self.get(name, default)))
def getint(self, name, default=0):
"""
Get a setting value as an int.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return int(self.get(name, default))
def getfloat(self, name, default=0.0):
"""
Get a setting value as a float.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
return float(self.get(name, default))
def getlist(self, name, default=None):
"""
Get a setting value as a list. If the setting original type is a list, a
copy of it will be returned. If it's a string it will be split by ",".
For example, settings populated through environment variables set to
``'one,two'`` will return a list ['one', 'two'] when using this method.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
value = self.get(name, default or [])
if isinstance(value, six.string_types):
value = value.split(',')
return list(value)
def getdict(self, name, default=None):
"""
Get a setting value as a dictionary. If the setting original type is a
dictionary, a copy of it will be returned. If it is a string it will be
evaluated as a JSON dictionary. In the case that it is a
:class:`~scrapy.settings.BaseSettings` instance itself, it will be
converted to a dictionary, containing all its current settings values
as they would be returned by :meth:`~scrapy.settings.BaseSettings.get`,
and losing all information about priority and mutability.
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
value = self.get(name, default or {})
if isinstance(value, six.string_types):
value = json.loads(value)
return dict(value)
def _getcomposite(self, name):
# DO NOT USE THIS FUNCTION IN YOUR CUSTOM PROJECTS
# It's for internal use in the transition away from the _BASE settings
# and will be removed along with _BASE support in a future release
basename = name + "_BASE"
if basename in self:
warnings.warn('_BASE settings are deprecated.',
category=ScrapyDeprecationWarning)
# When users defined a _BASE setting, they explicitly don't want to
# use any of Scrapy's defaults. Therefore, we only use these entries
# from self[name] (where the defaults now live) that have a priority
# higher than 'default'
compsett = BaseSettings(self[basename], priority='default')
for k in self[name]:
prio = self[name].getpriority(k)
if prio > get_settings_priority('default'):
compsett.set(k, self[name][k], prio)
return compsett
return self[name]
def getpriority(self, name):
"""
Return the current numerical priority value of a setting, or ``None`` if
the given ``name`` does not exist.
:param name: the setting name
:type name: string
"""
prio = None
if name in self:
prio = self.attributes[name].priority
return prio
def maxpriority(self):
"""
Return the numerical value of the highest priority present throughout
all settings, or the numerical value for ``default`` from
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` if there are no settings
stored.
"""
if len(self) > 0:
return max(self.getpriority(name) for name in self)
else:
return get_settings_priority('default')
def __setitem__(self, name, value):
self.set(name, value)
def set(self, name, value, priority='project'):
"""
Store a key/value attribute with a given priority.
Settings should be populated *before* configuring the Crawler object
(through the :meth:`~scrapy.crawler.Crawler.configure` method),
otherwise they won't have any effect.
:param name: the setting name
:type name: string
:param value: the value to associate with the setting
:type value: any
:param priority: the priority of the setting. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: string or int
"""
self._assert_mutability()
priority = get_settings_priority(priority)
if name not in self:
if isinstance(value, SettingsAttribute):
self.attributes[name] = value
else:
self.attributes[name] = SettingsAttribute(value, priority)
else:
self.attributes[name].set(value, priority)
def setdict(self, values, priority='project'):
self.update(values, priority)
def setmodule(self, module, priority='project'):
"""
Store settings from a module with a given priority.
This is a helper function that calls
:meth:`~scrapy.settings.BaseSettings.set` for every globally declared
uppercase variable of ``module`` with the provided ``priority``.
:param module: the module or the path of the module
:type module: module object or string
:param priority: the priority of the settings. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: string or int
"""
self._assert_mutability()
if isinstance(module, six.string_types):
module = import_module(module)
for key in dir(module):
if key.isupper():
self.set(key, getattr(module, key), priority)
def update(self, values, priority='project'):
"""
Store key/value pairs with a given priority.
This is a helper function that calls
:meth:`~scrapy.settings.BaseSettings.set` for every item of ``values``
with the provided ``priority``.
If ``values`` is a string, it is assumed to be JSON-encoded and parsed
into a dict with ``json.loads()`` first. If it is a
:class:`~scrapy.settings.BaseSettings` instance, the per-key priorities
will be used and the ``priority`` parameter ignored. This allows
inserting/updating settings with different priorities with a single
command.
:param values: the settings names and values
:type values: dict or string or :class:`~scrapy.settings.BaseSettings`
:param priority: the priority of the settings. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: string or int
"""
self._assert_mutability()
if isinstance(values, six.string_types):
values = json.loads(values)
if values is not None:
if isinstance(values, BaseSettings):
for name, value in six.iteritems(values):
self.set(name, value, values.getpriority(name))
else:
for name, value in six.iteritems(values):
self.set(name, value, priority)
def delete(self, name, priority='project'):
self._assert_mutability()
priority = get_settings_priority(priority)
if priority >= self.getpriority(name):
del self.attributes[name]
def __delitem__(self, name):
self._assert_mutability()
del self.attributes[name]
def _assert_mutability(self):
if self.frozen:
raise TypeError("Trying to modify an immutable Settings object")
def copy(self):
"""
Make a deep copy of current settings.
This method returns a new instance of the :class:`Settings` class,
populated with the same values and their priorities.
Modifications to the new object won't be reflected on the original
settings.
"""
return copy.deepcopy(self)
def freeze(self):
"""
Disable further changes to the current settings.
After calling this method, the present state of the settings will become
immutable. Trying to change values through the :meth:`~set` method and
its variants won't be possible and will be alerted.
"""
self.frozen = True
def frozencopy(self):
"""
Return an immutable copy of the current settings.
Alias for a :meth:`~freeze` call in the object returned by :meth:`copy`.
"""
copy = self.copy()
copy.freeze()
return copy
def __iter__(self):
return iter(self.attributes)
def __len__(self):
return len(self.attributes)
def __str__(self):
return str(self.attributes)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.attributes)
@property
def overrides(self):
warnings.warn("`Settings.overrides` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='cmdline')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._overrides
except AttributeError:
self._overrides = o = _DictProxy(self, 'cmdline')
return o
@property
def defaults(self):
warnings.warn("`Settings.defaults` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='default')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._defaults
except AttributeError:
self._defaults = o = _DictProxy(self, 'default')
return o
class _DictProxy(MutableMapping):
def __init__(self, settings, priority):
self.o = {}
self.settings = settings
self.priority = priority
def __len__(self):
return len(self.o)
def __getitem__(self, k):
return self.o[k]
def __setitem__(self, k, v):
self.settings.set(k, v, priority=self.priority)
self.o[k] = v
def __delitem__(self, k):
del self.o[k]
def __iter__(self, k, v):
return iter(self.o)
class Settings(BaseSettings):
"""
This object stores Scrapy settings for the configuration of internal
components, and can be used for any further customization.
It is a direct subclass and supports all methods of
:class:`~scrapy.settings.BaseSettings`. Additionally, after instantiation
of this class, the new object will have the global default settings
described on :ref:`topics-settings-ref` already populated.
"""
def __init__(self, values=None, priority='project'):
# Do not pass kwarg values here. We don't want to promote user-defined
# dicts, and we want to update, not replace, default dicts with the
# values given by the user
super(Settings, self).__init__()
self.setmodule(default_settings, 'default')
# Promote default dictionaries to BaseSettings instances for per-key
# priorities
for name, val in six.iteritems(self):
if isinstance(val, dict):
self.set(name, BaseSettings(val, 'default'), 'default')
self.update(values, priority)
class CrawlerSettings(Settings):
def __init__(self, settings_module=None, **kw):
self.settings_module = settings_module
Settings.__init__(self, **kw)
def __getitem__(self, opt_name):
if opt_name in self.overrides:
return self.overrides[opt_name]
if self.settings_module and hasattr(self.settings_module, opt_name):
return getattr(self.settings_module, opt_name)
if opt_name in self.defaults:
return self.defaults[opt_name]
return Settings.__getitem__(self, opt_name)
def __str__(self):
return "<CrawlerSettings module=%r>" % self.settings_module
CrawlerSettings = create_deprecated_class(
'CrawlerSettings', CrawlerSettings,
new_class_path='scrapy.settings.Settings')
def iter_default_settings():
"""Return the default settings as an iterator of (name, value) tuples"""
for name in dir(default_settings):
if name.isupper():
yield name, getattr(default_settings, name)
def overridden_settings(settings):
"""Return a dict of the settings that have been overridden"""
for name, defvalue in iter_default_settings():
value = settings[name]
if not isinstance(defvalue, dict) and value != defvalue:
yield name, value
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-26-fixed/scrapy/scrapy/settings/__init__.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-26-buggy/scrapy/scrapy/settings/__init__.py |
scrapy-bug-33 | """
This is the Scrapy engine which controls the Scheduler, Downloader and Spiders.
For more information see docs/topics/architecture.rst
"""
import logging
from time import time
from twisted.internet import defer
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response, Request
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import CallLaterOnce
from scrapy.utils.log import logformatter_adapter
logger = logging.getLogger(__name__)
class Slot(object):
def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
self.closing = False
self.inprogress = set() # requests in progress
self.start_requests = iter(start_requests)
self.close_if_idle = close_if_idle
self.nextcall = nextcall
self.scheduler = scheduler
def add_request(self, request):
self.inprogress.add(request)
def remove_request(self, request):
self.inprogress.remove(request)
self._maybe_fire_closing()
def close(self):
self.closing = defer.Deferred()
self._maybe_fire_closing()
return self.closing
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
self.closing.callback(None)
class ExecutionEngine(object):
def __init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = load_object(self.settings['SCHEDULER'])
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
@defer.inlineCallbacks
def start(self):
"""Start the execution engine"""
assert not self.running, "Engine already running"
self.start_time = time()
yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
self.running = True
self._closewait = defer.Deferred()
yield self._closewait
def stop(self):
"""Stop the execution engine gracefully"""
assert self.running, "Engine not running"
self.running = False
dfd = self._close_all_spiders()
return dfd.addBoth(lambda _: self._finish_stopping_engine())
def pause(self):
"""Pause the execution engine"""
self.paused = True
def unpause(self):
"""Resume the execution engine"""
self.paused = False
def _next_request(self, spider):
slot = self.slot
if not slot:
return
if self.paused:
slot.nextcall.schedule(5)
return
while not self._needs_backout(spider):
if not self._next_request_from_scheduler(spider):
break
if slot.start_requests and not self._needs_backout(spider):
try:
request = next(slot.start_requests)
except StopIteration:
slot.start_requests = None
except Exception:
slot.start_requests = None
logger.error('Error while obtaining start requests',
exc_info=True, extra={'spider': spider})
else:
self.crawl(request, spider)
if self.spider_is_idle(spider) and slot.close_if_idle:
self._spider_idle(spider)
def _needs_backout(self, spider):
slot = self.slot
return not self.running \
or slot.closing \
or self.downloader.needs_backout() \
or self.scraper.slot.needs_backout()
def _next_request_from_scheduler(self, spider):
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
extra={'spider': spider, 'failure': f}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
extra={'spider': spider, 'failure': f}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
extra={'spider': spider, 'failure': f}))
return d
def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
# downloader middleware can return requests (for example, redirects)
if isinstance(response, Request):
self.crawl(response, spider)
return
# response is a Response or Failure
d = self.scraper.enqueue_scrape(response, request, spider)
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
extra={'spider': spider, 'failure': f}))
return d
def spider_is_idle(self, spider):
scraper_idle = self.scraper.slot.is_idle()
pending = self.slot.scheduler.has_pending_requests()
downloading = bool(self.downloader.active)
pending_start_requests = self.slot.start_requests is not None
idle = scraper_idle and not (pending or downloading or pending_start_requests)
return idle
@property
def open_spiders(self):
return [self.spider] if self.spider else []
def has_capacity(self):
"""Does the engine have capacity to handle more spiders"""
return not bool(self.slot)
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule()
def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
def download(self, request, spider):
slot = self.slot
slot.add_request(request)
d = self._download(request, spider)
d.addBoth(self._downloaded, slot, request, spider)
return d
def _downloaded(self, response, slot, request, spider):
slot.remove_request(request)
return self.download(response, spider) \
if isinstance(response, Request) else response
def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request # tie request to response received
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response
def _on_complete(_):
slot.nextcall.schedule()
return _
dwld = self.downloader.fetch(request, spider)
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
@defer.inlineCallbacks
def open_spider(self, spider, start_requests=(), close_if_idle=True):
assert self.has_capacity(), "No free spider slot when opening %r" % \
spider.name
logger.info("Spider opened", extra={'spider': spider})
nextcall = CallLaterOnce(self._next_request, spider)
scheduler = self.scheduler_cls.from_crawler(self.crawler)
start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)
slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
self.slot = slot
self.spider = spider
yield scheduler.open(spider)
yield self.scraper.open_spider(spider)
self.crawler.stats.open_spider(spider)
yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
slot.nextcall.schedule()
def _spider_idle(self, spider):
"""Called when a spider gets idle. This function is called when there
are no remaining pages to download or schedule. It can be called
multiple times. If some extension raises a DontCloseSpider exception
(in the spider_idle signal handler) the spider is not closed until the
next loop and this function is guaranteed to be called (at least) once
again for this spider.
"""
res = self.signals.send_catch_log(signal=signals.spider_idle, \
spider=spider, dont_log=DontCloseSpider)
if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
for _, x in res):
self.slot.nextcall.schedule(5)
return
if self.spider_is_idle(spider):
self.close_spider(spider, reason='finished')
def close_spider(self, spider, reason='cancelled'):
"""Close (cancel) spider and clear all its outstanding requests"""
slot = self.slot
if slot.closing:
return slot.closing
logger.info("Closing spider (%(reason)s)",
{'reason': reason},
extra={'spider': spider})
dfd = slot.close()
def log_failure(msg):
def errback(failure):
logger.error(msg, extra={'spider': spider, 'failure': failure})
return errback
dfd.addBoth(lambda _: self.downloader.close())
dfd.addErrback(log_failure('Downloader close failure'))
dfd.addBoth(lambda _: self.scraper.close_spider(spider))
dfd.addErrback(log_failure('Scraper close failure'))
dfd.addBoth(lambda _: slot.scheduler.close(reason))
dfd.addErrback(log_failure('Scheduler close failure'))
dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(
signal=signals.spider_closed, spider=spider, reason=reason))
dfd.addErrback(log_failure('Error while sending spider_close signal'))
dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
dfd.addErrback(log_failure('Stats close failure'))
dfd.addBoth(lambda _: logger.info("Spider closed (%(reason)s)",
{'reason': reason},
extra={'spider': spider}))
dfd.addBoth(lambda _: setattr(self, 'slot', None))
dfd.addErrback(log_failure('Error while unassigning slot'))
dfd.addBoth(lambda _: setattr(self, 'spider', None))
dfd.addErrback(log_failure('Error while unassigning spider'))
dfd.addBoth(lambda _: self._spider_closed_callback(spider))
return dfd
def _close_all_spiders(self):
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
dlist = defer.DeferredList(dfds)
return dlist
@defer.inlineCallbacks
def _finish_stopping_engine(self):
yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
self._closewait.callback(None)
"""
This is the Scrapy engine which controls the Scheduler, Downloader and Spiders.
For more information see docs/topics/architecture.rst
"""
import logging
from time import time
from twisted.internet import defer
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response, Request
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import CallLaterOnce
from scrapy.utils.log import logformatter_adapter, failure_to_exc_info
logger = logging.getLogger(__name__)
class Slot(object):
def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
self.closing = False
self.inprogress = set() # requests in progress
self.start_requests = iter(start_requests)
self.close_if_idle = close_if_idle
self.nextcall = nextcall
self.scheduler = scheduler
def add_request(self, request):
self.inprogress.add(request)
def remove_request(self, request):
self.inprogress.remove(request)
self._maybe_fire_closing()
def close(self):
self.closing = defer.Deferred()
self._maybe_fire_closing()
return self.closing
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
self.closing.callback(None)
class ExecutionEngine(object):
def __init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = load_object(self.settings['SCHEDULER'])
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
@defer.inlineCallbacks
def start(self):
"""Start the execution engine"""
assert not self.running, "Engine already running"
self.start_time = time()
yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
self.running = True
self._closewait = defer.Deferred()
yield self._closewait
def stop(self):
"""Stop the execution engine gracefully"""
assert self.running, "Engine not running"
self.running = False
dfd = self._close_all_spiders()
return dfd.addBoth(lambda _: self._finish_stopping_engine())
def pause(self):
"""Pause the execution engine"""
self.paused = True
def unpause(self):
"""Resume the execution engine"""
self.paused = False
def _next_request(self, spider):
slot = self.slot
if not slot:
return
if self.paused:
slot.nextcall.schedule(5)
return
while not self._needs_backout(spider):
if not self._next_request_from_scheduler(spider):
break
if slot.start_requests and not self._needs_backout(spider):
try:
request = next(slot.start_requests)
except StopIteration:
slot.start_requests = None
except Exception:
slot.start_requests = None
logger.error('Error while obtaining start requests',
exc_info=True, extra={'spider': spider})
else:
self.crawl(request, spider)
if self.spider_is_idle(spider) and slot.close_if_idle:
self._spider_idle(spider)
def _needs_backout(self, spider):
slot = self.slot
return not self.running \
or slot.closing \
or self.downloader.needs_backout() \
or self.scraper.slot.needs_backout()
def _next_request_from_scheduler(self, spider):
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
# downloader middleware can return requests (for example, redirects)
if isinstance(response, Request):
self.crawl(response, spider)
return
# response is a Response or Failure
d = self.scraper.enqueue_scrape(response, request, spider)
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def spider_is_idle(self, spider):
scraper_idle = self.scraper.slot.is_idle()
pending = self.slot.scheduler.has_pending_requests()
downloading = bool(self.downloader.active)
pending_start_requests = self.slot.start_requests is not None
idle = scraper_idle and not (pending or downloading or pending_start_requests)
return idle
@property
def open_spiders(self):
return [self.spider] if self.spider else []
def has_capacity(self):
"""Does the engine have capacity to handle more spiders"""
return not bool(self.slot)
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule()
def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
def download(self, request, spider):
slot = self.slot
slot.add_request(request)
d = self._download(request, spider)
d.addBoth(self._downloaded, slot, request, spider)
return d
def _downloaded(self, response, slot, request, spider):
slot.remove_request(request)
return self.download(response, spider) \
if isinstance(response, Request) else response
def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request # tie request to response received
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response
def _on_complete(_):
slot.nextcall.schedule()
return _
dwld = self.downloader.fetch(request, spider)
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
@defer.inlineCallbacks
def open_spider(self, spider, start_requests=(), close_if_idle=True):
assert self.has_capacity(), "No free spider slot when opening %r" % \
spider.name
logger.info("Spider opened", extra={'spider': spider})
nextcall = CallLaterOnce(self._next_request, spider)
scheduler = self.scheduler_cls.from_crawler(self.crawler)
start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)
slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
self.slot = slot
self.spider = spider
yield scheduler.open(spider)
yield self.scraper.open_spider(spider)
self.crawler.stats.open_spider(spider)
yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
slot.nextcall.schedule()
def _spider_idle(self, spider):
"""Called when a spider gets idle. This function is called when there
are no remaining pages to download or schedule. It can be called
multiple times. If some extension raises a DontCloseSpider exception
(in the spider_idle signal handler) the spider is not closed until the
next loop and this function is guaranteed to be called (at least) once
again for this spider.
"""
res = self.signals.send_catch_log(signal=signals.spider_idle, \
spider=spider, dont_log=DontCloseSpider)
if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
for _, x in res):
self.slot.nextcall.schedule(5)
return
if self.spider_is_idle(spider):
self.close_spider(spider, reason='finished')
def close_spider(self, spider, reason='cancelled'):
"""Close (cancel) spider and clear all its outstanding requests"""
slot = self.slot
if slot.closing:
return slot.closing
logger.info("Closing spider (%(reason)s)",
{'reason': reason},
extra={'spider': spider})
dfd = slot.close()
def log_failure(msg):
def errback(failure):
logger.error(
msg,
exc_info=failure_to_exc_info(failure),
extra={'spider': spider}
)
return errback
dfd.addBoth(lambda _: self.downloader.close())
dfd.addErrback(log_failure('Downloader close failure'))
dfd.addBoth(lambda _: self.scraper.close_spider(spider))
dfd.addErrback(log_failure('Scraper close failure'))
dfd.addBoth(lambda _: slot.scheduler.close(reason))
dfd.addErrback(log_failure('Scheduler close failure'))
dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(
signal=signals.spider_closed, spider=spider, reason=reason))
dfd.addErrback(log_failure('Error while sending spider_close signal'))
dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
dfd.addErrback(log_failure('Stats close failure'))
dfd.addBoth(lambda _: logger.info("Spider closed (%(reason)s)",
{'reason': reason},
extra={'spider': spider}))
dfd.addBoth(lambda _: setattr(self, 'slot', None))
dfd.addErrback(log_failure('Error while unassigning slot'))
dfd.addBoth(lambda _: setattr(self, 'spider', None))
dfd.addErrback(log_failure('Error while unassigning spider'))
dfd.addBoth(lambda _: self._spider_closed_callback(spider))
return dfd
def _close_all_spiders(self):
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
dlist = defer.DeferredList(dfds)
return dlist
@defer.inlineCallbacks
def _finish_stopping_engine(self):
yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
self._closewait.callback(None)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-33-fixed/scrapy/scrapy/core/engine.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-33-buggy/scrapy/scrapy/core/engine.py |
scrapy-bug-17 | """
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import re
import weakref
import webbrowser
import tempfile
from twisted.web import http
from scrapy.utils.python import to_bytes, to_native_str
from w3lib import html
from scrapy.utils.decorators import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.text[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url,
response.encoding)
return _baseurl_cache[response]
_noscript_re = re.compile(u'<noscript>.*?</noscript>', re.IGNORECASE | re.DOTALL)
_script_re = re.compile(u'<script.*?>.*?</script>', re.IGNORECASE | re.DOTALL)
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.text[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url,
response.encoding)
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
>>> response_status_message(200)
'200 OK'
>>> response_status_message(404)
'404 Not Found'
"""
return '%s %s' % (status, to_native_str(http.RESPONSES.get(int(status))))
def response_httprepr(response):
"""Return raw HTTP representation (as bytes) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = b"HTTP/1.1 " + to_bytes(str(response.status)) + b" " + \
to_bytes(http.RESPONSES.get(response.status, b'')) + b"\r\n"
if response.headers:
s += response.headers.to_string() + b"\r\n"
s += b"\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = '<head><base href="%s">' % response.url
body = body.replace(b'<head>', to_bytes(repl))
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" %
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
"""
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import re
import weakref
import webbrowser
import tempfile
from twisted.web import http
from scrapy.utils.python import to_bytes, to_native_str
from w3lib import html
from scrapy.utils.decorators import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.text[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url,
response.encoding)
return _baseurl_cache[response]
_noscript_re = re.compile(u'<noscript>.*?</noscript>', re.IGNORECASE | re.DOTALL)
_script_re = re.compile(u'<script.*?>.*?</script>', re.IGNORECASE | re.DOTALL)
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.text[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url,
response.encoding)
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
"""
return '%s %s' % (status, to_native_str(http.RESPONSES.get(int(status), "Unknown Status")))
def response_httprepr(response):
"""Return raw HTTP representation (as bytes) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = b"HTTP/1.1 " + to_bytes(str(response.status)) + b" " + \
to_bytes(http.RESPONSES.get(response.status, b'')) + b"\r\n"
if response.headers:
s += response.headers.to_string() + b"\r\n"
s += b"\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = '<head><base href="%s">' % response.url
body = body.replace(b'<head>', to_bytes(repl))
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" %
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-17-fixed/scrapy/scrapy/utils/response.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-17-buggy/scrapy/scrapy/utils/response.py |
scrapy-bug-21 | """
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
import logging
from six.moves.urllib import robotparser
from twisted.internet.defer import Deferred, maybeDeferred
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
class RobotsTxtMiddleware(object):
DOWNLOAD_PRIORITY = 1000
def __init__(self, crawler):
if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
raise NotConfigured
self.crawler = crawler
self._useragent = crawler.settings.get('USER_AGENT')
self._parsers = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
if request.meta.get('dont_obey_robotstxt'):
return
d = maybeDeferred(self.robot_parser, request, spider)
d.addCallback(self.process_request_2, request, spider)
return d
def process_request_2(self, rp, request, spider):
if rp is not None and not rp.can_fetch(self._useragent, request.url):
logger.debug("Forbidden by robots.txt: %(request)s",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest()
def robot_parser(self, request, spider):
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = Deferred()
robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
robotsreq = Request(
robotsurl,
priority=self.DOWNLOAD_PRIORITY,
meta={'dont_obey_robotstxt': True}
)
dfd = self.crawler.engine.download(robotsreq, spider)
dfd.addCallback(self._parse_robots, netloc)
dfd.addErrback(self._logerror, robotsreq, spider)
dfd.addErrback(self._robots_error, netloc)
if isinstance(self._parsers[netloc], Deferred):
d = Deferred()
def cb(result):
d.callback(result)
return result
self._parsers[netloc].addCallback(cb)
return d
else:
return self._parsers[netloc]
def _logerror(self, failure, request, spider):
if failure.type is not IgnoreRequest:
logger.error("Error downloading %(request)s: %(f_exception)s",
{'request': request, 'f_exception': failure.value},
exc_info=failure_to_exc_info(failure),
extra={'spider': spider})
return failure
def _parse_robots(self, response, netloc):
rp = robotparser.RobotFileParser(response.url)
body = ''
if hasattr(response, 'text'):
body = response.text
else: # last effort try
try:
body = response.body.decode('utf-8')
except UnicodeDecodeError:
# If we found garbage, disregard it:,
# but keep the lookup cached (in self._parsers)
# Running rp.parse() will set rp state from
# 'disallow all' to 'allow any'.
pass
rp.parse(body.splitlines())
rp_dfd = self._parsers[netloc]
self._parsers[netloc] = rp
rp_dfd.callback(rp)
def _robots_error(self, failure, netloc):
self._parsers.pop(netloc).callback(None)
"""
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
import logging
from six.moves.urllib import robotparser
from twisted.internet.defer import Deferred, maybeDeferred
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
class RobotsTxtMiddleware(object):
DOWNLOAD_PRIORITY = 1000
def __init__(self, crawler):
if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
raise NotConfigured
self.crawler = crawler
self._useragent = crawler.settings.get('USER_AGENT')
self._parsers = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
if request.meta.get('dont_obey_robotstxt'):
return
d = maybeDeferred(self.robot_parser, request, spider)
d.addCallback(self.process_request_2, request, spider)
return d
def process_request_2(self, rp, request, spider):
if rp is not None and not rp.can_fetch(self._useragent, request.url):
logger.debug("Forbidden by robots.txt: %(request)s",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest()
def robot_parser(self, request, spider):
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = Deferred()
robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
robotsreq = Request(
robotsurl,
priority=self.DOWNLOAD_PRIORITY,
meta={'dont_obey_robotstxt': True}
)
dfd = self.crawler.engine.download(robotsreq, spider)
dfd.addCallback(self._parse_robots, netloc)
dfd.addErrback(self._logerror, robotsreq, spider)
dfd.addErrback(self._robots_error, netloc)
if isinstance(self._parsers[netloc], Deferred):
d = Deferred()
def cb(result):
d.callback(result)
return result
self._parsers[netloc].addCallback(cb)
return d
else:
return self._parsers[netloc]
def _logerror(self, failure, request, spider):
if failure.type is not IgnoreRequest:
logger.error("Error downloading %(request)s: %(f_exception)s",
{'request': request, 'f_exception': failure.value},
exc_info=failure_to_exc_info(failure),
extra={'spider': spider})
return failure
def _parse_robots(self, response, netloc):
rp = robotparser.RobotFileParser(response.url)
body = ''
if hasattr(response, 'text'):
body = response.text
else: # last effort try
try:
body = response.body.decode('utf-8')
except UnicodeDecodeError:
# If we found garbage, disregard it:,
# but keep the lookup cached (in self._parsers)
# Running rp.parse() will set rp state from
# 'disallow all' to 'allow any'.
pass
rp.parse(body.splitlines())
rp_dfd = self._parsers[netloc]
self._parsers[netloc] = rp
rp_dfd.callback(rp)
def _robots_error(self, failure, netloc):
rp_dfd = self._parsers[netloc]
self._parsers[netloc] = None
rp_dfd.callback(None)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-21-fixed/scrapy/scrapy/downloadermiddlewares/robotstxt.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-21-buggy/scrapy/scrapy/downloadermiddlewares/robotstxt.py |
scrapy-bug-19 | import time
from six.moves.http_cookiejar import (
CookieJar as _CookieJar, DefaultCookiePolicy, IPV4_RE
)
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_native_str
class CookieJar(object):
def __init__(self, policy=None, check_expired_frequency=10000):
self.policy = policy or DefaultCookiePolicy()
self.jar = _CookieJar(self.policy)
self.jar._cookies_lock = _DummyLock()
self.check_expired_frequency = check_expired_frequency
self.processed = 0
def extract_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.extract_cookies(wrsp, wreq)
def add_cookie_header(self, request):
wreq = WrappedRequest(request)
self.policy._now = self.jar._now = int(time.time())
# the cookiejar implementation iterates through all domains
# instead we restrict to potential matches on the domain
req_host = urlparse_cached(request).hostname
if not req_host:
return
if not IPV4_RE.search(req_host):
hosts = potential_domain_matches(req_host)
if '.' not in req_host:
hosts += [req_host + ".local"]
else:
hosts = [req_host]
cookies = []
for host in hosts:
if host in self.jar._cookies:
cookies += self.jar._cookies_for_domain(host, wreq)
attrs = self.jar._cookie_attrs(cookies)
if attrs:
if not wreq.has_header("Cookie"):
wreq.add_unredirected_header("Cookie", "; ".join(attrs))
self.processed += 1
if self.processed % self.check_expired_frequency == 0:
# This is still quite inefficient for large number of cookies
self.jar.clear_expired_cookies()
@property
def _cookies(self):
return self.jar._cookies
def clear_session_cookies(self, *args, **kwargs):
return self.jar.clear_session_cookies(*args, **kwargs)
def clear(self):
return self.jar.clear()
def __iter__(self):
return iter(self.jar)
def __len__(self):
return len(self.jar)
def set_policy(self, pol):
return self.jar.set_policy(pol)
def make_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.make_cookies(wrsp, wreq)
def set_cookie(self, cookie):
self.jar.set_cookie(cookie)
def set_cookie_if_ok(self, cookie, request):
self.jar.set_cookie_if_ok(cookie, WrappedRequest(request))
def potential_domain_matches(domain):
"""Potential domain matches for a cookie
>>> potential_domain_matches('www.example.com')
['www.example.com', 'example.com', '.www.example.com', '.example.com']
"""
matches = [domain]
try:
start = domain.index('.') + 1
end = domain.rindex('.')
while start < end:
matches.append(domain[start:])
start = domain.index('.', start) + 1
except ValueError:
pass
return matches + ['.' + d for d in matches]
class _DummyLock(object):
def acquire(self):
pass
def release(self):
pass
class WrappedRequest(object):
"""Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class
see http://docs.python.org/library/urllib2.html#urllib2.Request
"""
def __init__(self, request):
self.request = request
def get_full_url(self):
return self.request.url
def get_host(self):
return urlparse_cached(self.request).netloc
def get_type(self):
return urlparse_cached(self.request).scheme
def is_unverifiable(self):
"""Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.
It defaults to False. An unverifiable request is one whose URL the user did not have the
option to approve. For example, if the request is for an image in an
HTML document, and the user had no option to approve the automatic
fetching of the image, this should be true.
"""
return self.request.meta.get('is_unverifiable', False)
# python3 uses request.unverifiable
@property
def unverifiable(self):
return self.is_unverifiable()
def get_origin_req_host(self):
return urlparse_cached(self.request).hostname
def has_header(self, name):
return name in self.request.headers
def get_header(self, name, default=None):
return to_native_str(self.request.headers.get(name, default),
errors='replace')
def header_items(self):
return [
(to_native_str(k, errors='replace'),
[to_native_str(x, errors='replace') for x in v])
for k, v in self.request.headers.items()
]
def add_unredirected_header(self, name, value):
self.request.headers.appendlist(name, value)
class WrappedResponse(object):
def __init__(self, response):
self.response = response
def info(self):
return self
# python3 cookiejars calls get_all
def get_all(self, name, default=None):
return [to_native_str(v, errors='replace')
for v in self.response.headers.getlist(name)]
# python2 cookiejars calls getheaders
getheaders = get_all
import time
from six.moves.http_cookiejar import (
CookieJar as _CookieJar, DefaultCookiePolicy, IPV4_RE
)
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_native_str
class CookieJar(object):
def __init__(self, policy=None, check_expired_frequency=10000):
self.policy = policy or DefaultCookiePolicy()
self.jar = _CookieJar(self.policy)
self.jar._cookies_lock = _DummyLock()
self.check_expired_frequency = check_expired_frequency
self.processed = 0
def extract_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.extract_cookies(wrsp, wreq)
def add_cookie_header(self, request):
wreq = WrappedRequest(request)
self.policy._now = self.jar._now = int(time.time())
# the cookiejar implementation iterates through all domains
# instead we restrict to potential matches on the domain
req_host = urlparse_cached(request).hostname
if not req_host:
return
if not IPV4_RE.search(req_host):
hosts = potential_domain_matches(req_host)
if '.' not in req_host:
hosts += [req_host + ".local"]
else:
hosts = [req_host]
cookies = []
for host in hosts:
if host in self.jar._cookies:
cookies += self.jar._cookies_for_domain(host, wreq)
attrs = self.jar._cookie_attrs(cookies)
if attrs:
if not wreq.has_header("Cookie"):
wreq.add_unredirected_header("Cookie", "; ".join(attrs))
self.processed += 1
if self.processed % self.check_expired_frequency == 0:
# This is still quite inefficient for large number of cookies
self.jar.clear_expired_cookies()
@property
def _cookies(self):
return self.jar._cookies
def clear_session_cookies(self, *args, **kwargs):
return self.jar.clear_session_cookies(*args, **kwargs)
def clear(self):
return self.jar.clear()
def __iter__(self):
return iter(self.jar)
def __len__(self):
return len(self.jar)
def set_policy(self, pol):
return self.jar.set_policy(pol)
def make_cookies(self, response, request):
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.make_cookies(wrsp, wreq)
def set_cookie(self, cookie):
self.jar.set_cookie(cookie)
def set_cookie_if_ok(self, cookie, request):
self.jar.set_cookie_if_ok(cookie, WrappedRequest(request))
def potential_domain_matches(domain):
"""Potential domain matches for a cookie
>>> potential_domain_matches('www.example.com')
['www.example.com', 'example.com', '.www.example.com', '.example.com']
"""
matches = [domain]
try:
start = domain.index('.') + 1
end = domain.rindex('.')
while start < end:
matches.append(domain[start:])
start = domain.index('.', start) + 1
except ValueError:
pass
return matches + ['.' + d for d in matches]
class _DummyLock(object):
def acquire(self):
pass
def release(self):
pass
class WrappedRequest(object):
"""Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class
see http://docs.python.org/library/urllib2.html#urllib2.Request
"""
def __init__(self, request):
self.request = request
def get_full_url(self):
return self.request.url
def get_host(self):
return urlparse_cached(self.request).netloc
def get_type(self):
return urlparse_cached(self.request).scheme
def is_unverifiable(self):
"""Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.
It defaults to False. An unverifiable request is one whose URL the user did not have the
option to approve. For example, if the request is for an image in an
HTML document, and the user had no option to approve the automatic
fetching of the image, this should be true.
"""
return self.request.meta.get('is_unverifiable', False)
def get_origin_req_host(self):
return urlparse_cached(self.request).hostname
# python3 uses attributes instead of methods
@property
def full_url(self):
return self.get_full_url()
@property
def host(self):
return self.get_host()
@property
def type(self):
return self.get_type()
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
def has_header(self, name):
return name in self.request.headers
def get_header(self, name, default=None):
return to_native_str(self.request.headers.get(name, default),
errors='replace')
def header_items(self):
return [
(to_native_str(k, errors='replace'),
[to_native_str(x, errors='replace') for x in v])
for k, v in self.request.headers.items()
]
def add_unredirected_header(self, name, value):
self.request.headers.appendlist(name, value)
class WrappedResponse(object):
def __init__(self, response):
self.response = response
def info(self):
return self
# python3 cookiejars calls get_all
def get_all(self, name, default=None):
return [to_native_str(v, errors='replace')
for v in self.response.headers.getlist(name)]
# python2 cookiejars calls getheaders
getheaders = get_all
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-19-fixed/scrapy/scrapy/http/cookies.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-19-buggy/scrapy/scrapy/http/cookies.py |
scrapy-bug-37 | """
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
import six
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
from scrapy.utils.curl import curl_to_request_kwargs
class Request(object_ref):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
if callback is not None and not callable(callback):
raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
if errback is not None and not callable(errback):
raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
assert callback or not errback, "Cannot use errback without a callback"
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
@property
def cb_kwargs(self):
if self._cb_kwargs is None:
self._cb_kwargs = {}
return self._cb_kwargs
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_url(self):
return self._url
def _set_url(self, url):
if not isinstance(url, six.string_types):
raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
s = safe_url_string(url, self.encoding)
self._url = escape_ajax(s)
if ':' not in self._url:
raise ValueError('Missing scheme in request url: %s' % self._url)
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
else:
self._body = to_bytes(body, self.encoding)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
@property
def encoding(self):
return self._encoding
def __str__(self):
return "<%s %s>" % (self.method, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
@classmethod
def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):
"""Create a Request object from a string containing a `cURL
<https://curl.haxx.se/>`_ command. It populates the HTTP method, the
URL, the headers, the cookies and the body. It accepts the same
arguments as the :class:`Request` class, taking preference and
overriding the values of the same arguments contained in the cURL
command.
Unrecognized options are ignored by default. To raise an error when
finding unknown options call this method by passing
``ignore_unknown_options=False``.
.. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`
subclasses, such as :class:`~scrapy.http.JSONRequest`, or
:class:`~scrapy.http.XmlRpcRequest`, as well as having
:ref:`downloader middlewares <topics-downloader-middleware>`
and
:ref:`spider middlewares <topics-spider-middleware>`
enabled, such as
:class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
:class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
or
:class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
may modify the :class:`~scrapy.http.Request` object.
"""
request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
request_kwargs.update(kwargs)
return cls(**request_kwargs)
"""
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
import six
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
from scrapy.utils.curl import curl_to_request_kwargs
class Request(object_ref):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
if callback is not None and not callable(callback):
raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
if errback is not None and not callable(errback):
raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
assert callback or not errback, "Cannot use errback without a callback"
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
@property
def cb_kwargs(self):
if self._cb_kwargs is None:
self._cb_kwargs = {}
return self._cb_kwargs
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_url(self):
return self._url
def _set_url(self, url):
if not isinstance(url, six.string_types):
raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
s = safe_url_string(url, self.encoding)
self._url = escape_ajax(s)
if ('://' not in self._url) and (not self._url.startswith('data:')):
raise ValueError('Missing scheme in request url: %s' % self._url)
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
else:
self._body = to_bytes(body, self.encoding)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
@property
def encoding(self):
return self._encoding
def __str__(self):
return "<%s %s>" % (self.method, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
@classmethod
def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):
"""Create a Request object from a string containing a `cURL
<https://curl.haxx.se/>`_ command. It populates the HTTP method, the
URL, the headers, the cookies and the body. It accepts the same
arguments as the :class:`Request` class, taking preference and
overriding the values of the same arguments contained in the cURL
command.
Unrecognized options are ignored by default. To raise an error when
finding unknown options call this method by passing
``ignore_unknown_options=False``.
.. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`
subclasses, such as :class:`~scrapy.http.JSONRequest`, or
:class:`~scrapy.http.XmlRpcRequest`, as well as having
:ref:`downloader middlewares <topics-downloader-middleware>`
and
:ref:`spider middlewares <topics-spider-middleware>`
enabled, such as
:class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
:class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
or
:class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
may modify the :class:`~scrapy.http.Request` object.
"""
request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
request_kwargs.update(kwargs)
return cls(**request_kwargs)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-37-fixed/scrapy/scrapy/http/request/__init__.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-37-buggy/scrapy/scrapy/http/request/__init__.py |
scrapy-bug-10 | import logging
from six.moves.urllib.parse import urljoin
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.utils.python import to_native_str
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""Handle redirection of requests based on response status and meta-refresh html tag"""
def process_response(self, request, response, spider):
if (request.meta.get('dont_redirect', False) or
response.status in getattr(spider, 'handle_httpstatus_list', []) or
response.status in request.meta.get('handle_httpstatus_list', []) or
request.meta.get('handle_httpstatus_all', False)):
return response
allowed_status = (301, 302, 303, 307)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
# HTTP header is ascii or latin1, redirected url will be percent-encoded utf-8
location = to_native_str(response.headers['location'].decode('latin1'))
redirected_url = urljoin(request.url, location)
if response.status in (301, 307) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
if isinstance(response, HtmlResponse):
interval, url = get_meta_refresh(response)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
import logging
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""Handle redirection of requests based on response status and meta-refresh html tag"""
def process_response(self, request, response, spider):
if (request.meta.get('dont_redirect', False) or
response.status in getattr(spider, 'handle_httpstatus_list', []) or
response.status in request.meta.get('handle_httpstatus_list', []) or
request.meta.get('handle_httpstatus_all', False)):
return response
allowed_status = (301, 302, 303, 307)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
location = safe_url_string(response.headers['location'])
redirected_url = urljoin(request.url, location)
if response.status in (301, 307) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
if isinstance(response, HtmlResponse):
interval, url = get_meta_refresh(response)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-10-fixed/scrapy/scrapy/downloadermiddlewares/redirect.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-10-buggy/scrapy/scrapy/downloadermiddlewares/redirect.py |
scrapy-bug-23 | import base64
from six.moves.urllib.request import getproxies, proxy_bypass
from six.moves.urllib.parse import unquote
try:
from urllib2 import _parse_proxy
except ImportError:
from urllib.request import _parse_proxy
from six.moves.urllib.parse import urlunparse
from scrapy.utils.httpobj import urlparse_cached
from scrapy.exceptions import NotConfigured
class HttpProxyMiddleware(object):
def __init__(self):
self.proxies = {}
for type, url in getproxies().items():
self.proxies[type] = self._get_proxy(url, type)
if not self.proxies:
raise NotConfigured
def _get_proxy(self, url, orig_type):
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
if user:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
else:
creds = None
return creds, proxy_url
def process_request(self, request, spider):
# ignore if proxy is already seted
if 'proxy' in request.meta:
return
parsed = urlparse_cached(request)
scheme = parsed.scheme
# 'no_proxy' is only supported by http schemes
if scheme in ('http', 'https') and proxy_bypass(parsed.hostname):
return
if scheme in self.proxies:
self._set_proxy(request, scheme)
def _set_proxy(self, request, scheme):
creds, proxy = self.proxies[scheme]
request.meta['proxy'] = proxy
if creds:
request.headers['Proxy-Authorization'] = 'Basic ' + creds
import base64
from six.moves.urllib.request import getproxies, proxy_bypass
from six.moves.urllib.parse import unquote
try:
from urllib2 import _parse_proxy
except ImportError:
from urllib.request import _parse_proxy
from six.moves.urllib.parse import urlunparse
from scrapy.utils.httpobj import urlparse_cached
from scrapy.exceptions import NotConfigured
from scrapy.utils.python import to_bytes
class HttpProxyMiddleware(object):
def __init__(self):
self.proxies = {}
for type, url in getproxies().items():
self.proxies[type] = self._get_proxy(url, type)
if not self.proxies:
raise NotConfigured
def _get_proxy(self, url, orig_type):
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
if user:
user_pass = to_bytes('%s:%s' % (unquote(user), unquote(password)))
creds = base64.b64encode(user_pass).strip()
else:
creds = None
return creds, proxy_url
def process_request(self, request, spider):
# ignore if proxy is already seted
if 'proxy' in request.meta:
return
parsed = urlparse_cached(request)
scheme = parsed.scheme
# 'no_proxy' is only supported by http schemes
if scheme in ('http', 'https') and proxy_bypass(parsed.hostname):
return
if scheme in self.proxies:
self._set_proxy(request, scheme)
def _set_proxy(self, request, scheme):
creds, proxy = self.proxies[scheme]
request.meta['proxy'] = proxy
if creds:
request.headers['Proxy-Authorization'] = b'Basic ' + creds
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-23-fixed/scrapy/scrapy/downloadermiddlewares/httpproxy.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-23-buggy/scrapy/scrapy/downloadermiddlewares/httpproxy.py |
scrapy-bug-1 | """
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class OffsiteMiddleware(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider):
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug(
"Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
url_pattern = re.compile("^https?://.*$")
for domain in allowed_domains:
if url_pattern.match(domain):
message = ("allowed_domains accepts only domains, not URLs. "
"Ignoring URL entry %s in allowed_domains." % domain)
warnings.warn(message, URLWarning)
domains = [re.escape(d) for d in allowed_domains if d is not None]
regex = r'^(.*\.)?(%s)$' % '|'.join(domains)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
class URLWarning(Warning):
pass
"""
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class OffsiteMiddleware(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider):
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug(
"Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
url_pattern = re.compile("^https?://.*$")
domains = []
for domain in allowed_domains:
if domain is None:
continue
elif url_pattern.match(domain):
message = ("allowed_domains accepts only domains, not URLs. "
"Ignoring URL entry %s in allowed_domains." % domain)
warnings.warn(message, URLWarning)
else:
domains.append(re.escape(domain))
regex = r'^(.*\.)?(%s)$' % '|'.join(domains)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
class URLWarning(Warning):
pass
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-1-fixed/scrapy/scrapy/spidermiddlewares/offsite.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-1-buggy/scrapy/scrapy/spidermiddlewares/offsite.py |
scrapy-bug-7 | """
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
import six
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, formcss=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
if formcss is not None:
from parsel.csstranslator import HTMLTranslator
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
return urljoin(form.base_url, form.action)
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(@type) or @type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../@checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend(formdata.items())
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::*[(self::input or self::button)'
' and re:test(@type, "^submit$", "i")]'
'|descendant::button[not(@type)]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
"""
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, formcss=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
if formcss is not None:
from parsel.csstranslator import HTMLTranslator
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
action = form.get('action')
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(@type) or @type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../@checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend(formdata.items())
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::*[(self::input or self::button)'
' and re:test(@type, "^submit$", "i")]'
'|descendant::button[not(@type)]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-7-fixed/scrapy/scrapy/http/request/form.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-7-buggy/scrapy/scrapy/http/request/form.py |
scrapy-bug-18 | """
This module implements a class which returns the appropriate Response class
based on different criteria.
"""
from __future__ import absolute_import
from mimetypes import MimeTypes
from pkgutil import get_data
from io import StringIO
import six
from scrapy.http import Response
from scrapy.utils.misc import load_object
from scrapy.utils.python import isbinarytext, to_bytes, to_native_str
class ResponseTypes(object):
CLASSES = {
'text/html': 'scrapy.http.HtmlResponse',
'application/atom+xml': 'scrapy.http.XmlResponse',
'application/rdf+xml': 'scrapy.http.XmlResponse',
'application/rss+xml': 'scrapy.http.XmlResponse',
'application/xhtml+xml': 'scrapy.http.HtmlResponse',
'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',
'application/xml': 'scrapy.http.XmlResponse',
'application/json': 'scrapy.http.TextResponse',
'application/x-json': 'scrapy.http.TextResponse',
'application/javascript': 'scrapy.http.TextResponse',
'application/x-javascript': 'scrapy.http.TextResponse',
'text/xml': 'scrapy.http.XmlResponse',
'text/*': 'scrapy.http.TextResponse',
}
def __init__(self):
self.classes = {}
self.mimetypes = MimeTypes()
mimedata = get_data('scrapy', 'mime.types').decode('utf8')
self.mimetypes.readfp(StringIO(mimedata))
for mimetype, cls in six.iteritems(self.CLASSES):
self.classes[mimetype] = load_object(cls)
def from_mimetype(self, mimetype):
"""Return the most appropriate Response class for the given mimetype"""
if mimetype is None:
return Response
elif mimetype in self.classes:
return self.classes[mimetype]
else:
basetype = "%s/*" % mimetype.split('/')[0]
return self.classes.get(basetype, Response)
def from_content_type(self, content_type, content_encoding=None):
"""Return the most appropriate Response class from an HTTP Content-Type
header """
if content_encoding:
return Response
mimetype = to_native_str(content_type).split(';')[0].strip().lower()
return self.from_mimetype(mimetype)
def from_content_disposition(self, content_disposition):
try:
filename = to_native_str(content_disposition).split(';')[1].split('=')[1]
filename = filename.strip('"\'')
return self.from_filename(filename)
except IndexError:
return Response
def from_headers(self, headers):
"""Return the most appropriate Response class by looking at the HTTP
headers"""
cls = Response
if b'Content-Type' in headers:
cls = self.from_content_type(
content_type=headers[b'Content-type'],
content_encoding=headers.get(b'Content-Encoding')
)
if cls is Response and b'Content-Disposition' in headers:
cls = self.from_content_disposition(headers[b'Content-Disposition'])
return cls
def from_filename(self, filename):
"""Return the most appropriate Response class from a file name"""
mimetype, encoding = self.mimetypes.guess_type(filename)
if mimetype and not encoding:
return self.from_mimetype(mimetype)
else:
return Response
def from_body(self, body):
"""Try to guess the appropriate response based on the body content.
This method is a bit magic and could be improved in the future, but
it's not meant to be used except for special cases where response types
cannot be guess using more straightforward methods."""
chunk = body[:5000]
chunk = to_bytes(chunk)
if isbinarytext(chunk):
return self.from_mimetype('application/octet-stream')
elif b"<html>" in chunk.lower():
return self.from_mimetype('text/html')
elif b"<?xml" in chunk.lower():
return self.from_mimetype('text/xml')
else:
return self.from_mimetype('text')
def from_args(self, headers=None, url=None, filename=None, body=None):
"""Guess the most appropriate Response class based on
the given arguments."""
cls = Response
if headers is not None:
cls = self.from_headers(headers)
if cls is Response and url is not None:
cls = self.from_filename(url)
if cls is Response and filename is not None:
cls = self.from_filename(filename)
if cls is Response and body is not None:
cls = self.from_body(body)
return cls
responsetypes = ResponseTypes()
"""
This module implements a class which returns the appropriate Response class
based on different criteria.
"""
from __future__ import absolute_import
from mimetypes import MimeTypes
from pkgutil import get_data
from io import StringIO
import six
from scrapy.http import Response
from scrapy.utils.misc import load_object
from scrapy.utils.python import isbinarytext, to_bytes, to_native_str
class ResponseTypes(object):
CLASSES = {
'text/html': 'scrapy.http.HtmlResponse',
'application/atom+xml': 'scrapy.http.XmlResponse',
'application/rdf+xml': 'scrapy.http.XmlResponse',
'application/rss+xml': 'scrapy.http.XmlResponse',
'application/xhtml+xml': 'scrapy.http.HtmlResponse',
'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',
'application/xml': 'scrapy.http.XmlResponse',
'application/json': 'scrapy.http.TextResponse',
'application/x-json': 'scrapy.http.TextResponse',
'application/javascript': 'scrapy.http.TextResponse',
'application/x-javascript': 'scrapy.http.TextResponse',
'text/xml': 'scrapy.http.XmlResponse',
'text/*': 'scrapy.http.TextResponse',
}
def __init__(self):
self.classes = {}
self.mimetypes = MimeTypes()
mimedata = get_data('scrapy', 'mime.types').decode('utf8')
self.mimetypes.readfp(StringIO(mimedata))
for mimetype, cls in six.iteritems(self.CLASSES):
self.classes[mimetype] = load_object(cls)
def from_mimetype(self, mimetype):
"""Return the most appropriate Response class for the given mimetype"""
if mimetype is None:
return Response
elif mimetype in self.classes:
return self.classes[mimetype]
else:
basetype = "%s/*" % mimetype.split('/')[0]
return self.classes.get(basetype, Response)
def from_content_type(self, content_type, content_encoding=None):
"""Return the most appropriate Response class from an HTTP Content-Type
header """
if content_encoding:
return Response
mimetype = to_native_str(content_type).split(';')[0].strip().lower()
return self.from_mimetype(mimetype)
def from_content_disposition(self, content_disposition):
try:
filename = to_native_str(content_disposition,
encoding='latin-1', errors='replace').split(';')[1].split('=')[1]
filename = filename.strip('"\'')
return self.from_filename(filename)
except IndexError:
return Response
def from_headers(self, headers):
"""Return the most appropriate Response class by looking at the HTTP
headers"""
cls = Response
if b'Content-Type' in headers:
cls = self.from_content_type(
content_type=headers[b'Content-type'],
content_encoding=headers.get(b'Content-Encoding')
)
if cls is Response and b'Content-Disposition' in headers:
cls = self.from_content_disposition(headers[b'Content-Disposition'])
return cls
def from_filename(self, filename):
"""Return the most appropriate Response class from a file name"""
mimetype, encoding = self.mimetypes.guess_type(filename)
if mimetype and not encoding:
return self.from_mimetype(mimetype)
else:
return Response
def from_body(self, body):
"""Try to guess the appropriate response based on the body content.
This method is a bit magic and could be improved in the future, but
it's not meant to be used except for special cases where response types
cannot be guess using more straightforward methods."""
chunk = body[:5000]
chunk = to_bytes(chunk)
if isbinarytext(chunk):
return self.from_mimetype('application/octet-stream')
elif b"<html>" in chunk.lower():
return self.from_mimetype('text/html')
elif b"<?xml" in chunk.lower():
return self.from_mimetype('text/xml')
else:
return self.from_mimetype('text')
def from_args(self, headers=None, url=None, filename=None, body=None):
"""Guess the most appropriate Response class based on
the given arguments."""
cls = Response
if headers is not None:
cls = self.from_headers(headers)
if cls is Response and url is not None:
cls = self.from_filename(url)
if cls is Response and filename is not None:
cls = self.from_filename(filename)
if cls is Response and body is not None:
cls = self.from_body(body)
return cls
responsetypes = ResponseTypes()
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-18-fixed/scrapy/scrapy/responsetypes.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-18-buggy/scrapy/scrapy/responsetypes.py |
scrapy-bug-13 | """
Images Pipeline
See documentation in topics/media-pipeline.rst
"""
import functools
import hashlib
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from PIL import Image
from scrapy.utils.misc import md5sum
from scrapy.utils.python import to_bytes
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy.exceptions import DropItem
#TODO: from scrapy.pipelines.media import MediaPipeline
from scrapy.pipelines.files import FileException, FilesPipeline
class NoimagesDrop(DropItem):
"""Product with no images exception"""
class ImageException(FileException):
"""General image error exception"""
class ImagesPipeline(FilesPipeline):
"""Abstract pipeline that implement the image thumbnail generation logic
"""
MEDIA_NAME = 'image'
# Uppercase attributes kept for backward compatibility with code that subclasses
# ImagesPipeline. They may be overridden by settings.
MIN_WIDTH = 0
MIN_HEIGHT = 0
EXPIRES = 0
THUMBS = {}
DEFAULT_IMAGES_URLS_FIELD = 'image_urls'
DEFAULT_IMAGES_RESULT_FIELD = 'images'
def __init__(self, store_uri, download_func=None, settings=None):
super(ImagesPipeline, self).__init__(store_uri, settings=settings,
download_func=download_func)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
resolve = functools.partial(self._key_for_pipe,
base_class_name="ImagesPipeline")
self.expires = settings.getint(
resolve("IMAGES_EXPIRES"), self.EXPIRES
)
if not hasattr(self, "IMAGES_RESULT_FIELD"):
self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD
if not hasattr(self, "IMAGES_URLS_FIELD"):
self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD
self.images_urls_field = settings.get(
resolve('IMAGES_URLS_FIELD'),
self.IMAGES_URLS_FIELD
)
self.images_result_field = settings.get(
resolve('IMAGES_RESULT_FIELD'),
self.IMAGES_RESULT_FIELD
)
self.min_width = settings.getint(
resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH
)
self.min_height = settings.getint(
resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT
)
self.thumbs = settings.get(
resolve('IMAGES_THUMBS'), self.THUMBS
)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
s3store.POLICY = settings['IMAGES_STORE_S3_ACL']
store_uri = settings['IMAGES_STORE']
return cls(store_uri, settings=settings)
def file_downloaded(self, response, request, info):
return self.image_downloaded(response, request, info)
def image_downloaded(self, response, request, info):
checksum = None
for path, image, buf in self.get_images(response, request, info):
if checksum is None:
buf.seek(0)
checksum = md5sum(buf)
width, height = image.size
self.store.persist_file(
path, buf, info,
meta={'width': width, 'height': height},
headers={'Content-Type': 'image/jpeg'})
return checksum
def get_images(self, response, request, info):
path = self.file_path(request, response=response, info=info)
orig_image = Image.open(BytesIO(response.body))
width, height = orig_image.size
if width < self.min_width or height < self.min_height:
raise ImageException("Image too small (%dx%d < %dx%d)" %
(width, height, self.min_width, self.min_height))
image, buf = self.convert_image(orig_image)
yield path, image, buf
for thumb_id, size in six.iteritems(self.thumbs):
thumb_path = self.thumb_path(request, thumb_id, response=response, info=info)
thumb_image, thumb_buf = self.convert_image(image, size)
yield thumb_path, thumb_image, thumb_buf
def convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
background = Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
image = image.convert('RGB')
if size:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
buf = BytesIO()
image.save(buf, 'JPEG')
return image, buf
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.images_urls_field, [])]
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.images_result_field in item.fields:
item[self.images_result_field] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are deprecated, '
'please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from image_key or file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() or image_key() methods have been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
## end of deprecation warning block
image_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'full/%s.jpg' % (image_guid)
def thumb_path(self, request, thumb_id, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.thumb_key(url) method is deprecated, please use '
'thumb_path(request, thumb_id, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from thumb_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if thumb_key() method has been overridden
if not hasattr(self.thumb_key, '_base'):
_warn()
return self.thumb_key(url, thumb_id)
## end of deprecation warning block
thumb_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'thumbs/%s/%s.jpg' % (thumb_id, thumb_guid)
# deprecated
def file_key(self, url):
return self.image_key(url)
file_key._base = True
# deprecated
def image_key(self, url):
return self.file_path(url)
image_key._base = True
# deprecated
def thumb_key(self, url, thumb_id):
return self.thumb_path(url, thumb_id)
thumb_key._base = True
"""
Images Pipeline
See documentation in topics/media-pipeline.rst
"""
import functools
import hashlib
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from PIL import Image
from scrapy.utils.misc import md5sum
from scrapy.utils.python import to_bytes
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy.exceptions import DropItem
#TODO: from scrapy.pipelines.media import MediaPipeline
from scrapy.pipelines.files import FileException, FilesPipeline
class NoimagesDrop(DropItem):
"""Product with no images exception"""
class ImageException(FileException):
"""General image error exception"""
class ImagesPipeline(FilesPipeline):
"""Abstract pipeline that implement the image thumbnail generation logic
"""
MEDIA_NAME = 'image'
# Uppercase attributes kept for backward compatibility with code that subclasses
# ImagesPipeline. They may be overridden by settings.
MIN_WIDTH = 0
MIN_HEIGHT = 0
EXPIRES = 90
THUMBS = {}
DEFAULT_IMAGES_URLS_FIELD = 'image_urls'
DEFAULT_IMAGES_RESULT_FIELD = 'images'
def __init__(self, store_uri, download_func=None, settings=None):
super(ImagesPipeline, self).__init__(store_uri, settings=settings,
download_func=download_func)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
resolve = functools.partial(self._key_for_pipe,
base_class_name="ImagesPipeline")
self.expires = settings.getint(
resolve("IMAGES_EXPIRES"), self.EXPIRES
)
if not hasattr(self, "IMAGES_RESULT_FIELD"):
self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD
if not hasattr(self, "IMAGES_URLS_FIELD"):
self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD
self.images_urls_field = settings.get(
resolve('IMAGES_URLS_FIELD'),
self.IMAGES_URLS_FIELD
)
self.images_result_field = settings.get(
resolve('IMAGES_RESULT_FIELD'),
self.IMAGES_RESULT_FIELD
)
self.min_width = settings.getint(
resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH
)
self.min_height = settings.getint(
resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT
)
self.thumbs = settings.get(
resolve('IMAGES_THUMBS'), self.THUMBS
)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
s3store.POLICY = settings['IMAGES_STORE_S3_ACL']
store_uri = settings['IMAGES_STORE']
return cls(store_uri, settings=settings)
def file_downloaded(self, response, request, info):
return self.image_downloaded(response, request, info)
def image_downloaded(self, response, request, info):
checksum = None
for path, image, buf in self.get_images(response, request, info):
if checksum is None:
buf.seek(0)
checksum = md5sum(buf)
width, height = image.size
self.store.persist_file(
path, buf, info,
meta={'width': width, 'height': height},
headers={'Content-Type': 'image/jpeg'})
return checksum
def get_images(self, response, request, info):
path = self.file_path(request, response=response, info=info)
orig_image = Image.open(BytesIO(response.body))
width, height = orig_image.size
if width < self.min_width or height < self.min_height:
raise ImageException("Image too small (%dx%d < %dx%d)" %
(width, height, self.min_width, self.min_height))
image, buf = self.convert_image(orig_image)
yield path, image, buf
for thumb_id, size in six.iteritems(self.thumbs):
thumb_path = self.thumb_path(request, thumb_id, response=response, info=info)
thumb_image, thumb_buf = self.convert_image(image, size)
yield thumb_path, thumb_image, thumb_buf
def convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
background = Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
image = image.convert('RGB')
if size:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
buf = BytesIO()
image.save(buf, 'JPEG')
return image, buf
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.images_urls_field, [])]
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.images_result_field in item.fields:
item[self.images_result_field] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are deprecated, '
'please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from image_key or file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() or image_key() methods have been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
## end of deprecation warning block
image_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'full/%s.jpg' % (image_guid)
def thumb_path(self, request, thumb_id, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.thumb_key(url) method is deprecated, please use '
'thumb_path(request, thumb_id, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from thumb_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if thumb_key() method has been overridden
if not hasattr(self.thumb_key, '_base'):
_warn()
return self.thumb_key(url, thumb_id)
## end of deprecation warning block
thumb_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'thumbs/%s/%s.jpg' % (thumb_id, thumb_guid)
# deprecated
def file_key(self, url):
return self.image_key(url)
file_key._base = True
# deprecated
def image_key(self, url):
return self.file_path(url)
image_key._base = True
# deprecated
def thumb_key(self, url, thumb_id):
return self.thumb_path(url, thumb_id)
thumb_key._base = True
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-13-fixed/scrapy/scrapy/pipelines/images.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-13-buggy/scrapy/scrapy/pipelines/images.py |
scrapy-bug-20 | import re
import logging
import six
from scrapy.spiders import Spider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip, is_gzipped
logger = logging.getLogger(__name__)
class SitemapSpider(Spider):
sitemap_urls = ()
sitemap_rules = [('', 'parse')]
sitemap_follow = ['']
sitemap_alternate_links = False
def __init__(self, *a, **kw):
super(SitemapSpider, self).__init__(*a, **kw)
self._cbs = []
for r, c in self.sitemap_rules:
if isinstance(c, six.string_types):
c = getattr(self, c)
self._cbs.append((regex(r), c))
self._follow = [regex(x) for x in self.sitemap_follow]
def start_requests(self):
for url in self.sitemap_urls:
yield Request(url, self._parse_sitemap)
def _parse_sitemap(self, response):
if response.url.endswith('/robots.txt'):
for url in sitemap_urls_from_robots(response.body):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning("Ignoring invalid sitemap: %(response)s",
{'response': response}, extra={'spider': self})
return
s = Sitemap(body)
if s.type == 'sitemapindex':
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
def _get_sitemap_body(self, response):
"""Return the sitemap body contained in the given response,
or None if the response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
elif is_gzipped(response):
return gunzip(response.body)
elif response.url.endswith('.xml'):
return response.body
elif response.url.endswith('.xml.gz'):
return gunzip(response.body)
def regex(x):
if isinstance(x, six.string_types):
return re.compile(x)
return x
def iterloc(it, alt=False):
for d in it:
yield d['loc']
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and 'alternate' in d:
for l in d['alternate']:
yield l
import re
import logging
import six
from scrapy.spiders import Spider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip, is_gzipped
logger = logging.getLogger(__name__)
class SitemapSpider(Spider):
sitemap_urls = ()
sitemap_rules = [('', 'parse')]
sitemap_follow = ['']
sitemap_alternate_links = False
def __init__(self, *a, **kw):
super(SitemapSpider, self).__init__(*a, **kw)
self._cbs = []
for r, c in self.sitemap_rules:
if isinstance(c, six.string_types):
c = getattr(self, c)
self._cbs.append((regex(r), c))
self._follow = [regex(x) for x in self.sitemap_follow]
def start_requests(self):
for url in self.sitemap_urls:
yield Request(url, self._parse_sitemap)
def _parse_sitemap(self, response):
if response.url.endswith('/robots.txt'):
for url in sitemap_urls_from_robots(response.text):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning("Ignoring invalid sitemap: %(response)s",
{'response': response}, extra={'spider': self})
return
s = Sitemap(body)
if s.type == 'sitemapindex':
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
def _get_sitemap_body(self, response):
"""Return the sitemap body contained in the given response,
or None if the response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
elif is_gzipped(response):
return gunzip(response.body)
elif response.url.endswith('.xml'):
return response.body
elif response.url.endswith('.xml.gz'):
return gunzip(response.body)
def regex(x):
if isinstance(x, six.string_types):
return re.compile(x)
return x
def iterloc(it, alt=False):
for d in it:
yield d['loc']
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and 'alternate' in d:
for l in d['alternate']:
yield l
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-20-fixed/scrapy/scrapy/spiders/sitemap.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-20-buggy/scrapy/scrapy/spiders/sitemap.py |
scrapy-bug-2 | """
This module contains data types used by Scrapy which are not included in the
Python Standard Library.
This module must not depend on any module outside the Standard Library.
"""
import copy
import collections
import warnings
import six
from scrapy.exceptions import ScrapyDeprecationWarning
if six.PY2:
Mapping = collections.Mapping
else:
Mapping = collections.abc.Mapping
class MultiValueDictKeyError(KeyError):
def __init__(self, *args, **kwargs):
warnings.warn(
"scrapy.utils.datatypes.MultiValueDictKeyError is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2
)
super(MultiValueDictKeyError, self).__init__(*args, **kwargs)
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
warnings.warn("scrapy.utils.datatypes.MultiValueDict is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2)
dict.__init__(self, key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, dict.__repr__(self))
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = dict.__getitem__(self, key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
dict.__setitem__(self, key, [value])
def __copy__(self):
return self.__class__(dict.items(self))
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def get(self, key, default=None):
"Returns the default value if the requested data doesn't exist"
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"Returns an empty list if the requested data doesn't exist"
try:
return dict.__getitem__(self, key)
except KeyError:
return []
def setlist(self, key, list_):
dict.__setitem__(self, key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"Appends an item to the internal list associated with key"
self.setlistdefault(key, [])
dict.__setitem__(self, key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def lists(self):
"Returns a list of (key, list) pairs."
return dict.items(self)
def values(self):
"Returns a list of the last value on every key list."
return [self[key] for key in self.keys()]
def copy(self):
"Returns a copy of this object."
return self.__deepcopy__()
def update(self, *args, **kwargs):
"update() extends rather than replaces existing key lists. Also accepts keyword args."
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key, []).append(value)
class SiteNode(object):
"""Class to represent a site node (page, image or any other file)"""
def __init__(self, url):
warnings.warn(
"scrapy.utils.datatypes.SiteNode is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2
)
self.url = url
self.itemnames = []
self.children = []
self.parent = None
def add_child(self, node):
self.children.append(node)
node.parent = self
def to_string(self, level=0):
s = "%s%s\n" % (' '*level, self.url)
if self.itemnames:
for n in self.itemnames:
s += "%sScraped: %s\n" % (' '*(level+1), n)
for node in self.children:
s += node.to_string(level+1)
return s
class CaselessDict(dict):
__slots__ = ()
def __init__(self, seq=None):
super(CaselessDict, self).__init__()
if seq:
self.update(seq)
def __getitem__(self, key):
return dict.__getitem__(self, self.normkey(key))
def __setitem__(self, key, value):
dict.__setitem__(self, self.normkey(key), self.normvalue(value))
def __delitem__(self, key):
dict.__delitem__(self, self.normkey(key))
def __contains__(self, key):
return dict.__contains__(self, self.normkey(key))
has_key = __contains__
def __copy__(self):
return self.__class__(self)
copy = __copy__
def normkey(self, key):
"""Method to normalize dictionary key access"""
return key.lower()
def normvalue(self, value):
"""Method to normalize values prior to be setted"""
return value
def get(self, key, def_val=None):
return dict.get(self, self.normkey(key), self.normvalue(def_val))
def setdefault(self, key, def_val=None):
return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))
def update(self, seq):
seq = seq.items() if isinstance(seq, Mapping) else seq
iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)
super(CaselessDict, self).update(iseq)
@classmethod
def fromkeys(cls, keys, value=None):
return cls((k, value) for k in keys)
def pop(self, key, *args):
return dict.pop(self, self.normkey(key), *args)
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
if not six.PY2:
warnings.warn(
"scrapy.utils.datatypes.MergeDict is deprecated in favor "
"of collections.ChainMap (introduced in Python 3.3)",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def items(self):
item_list = []
for dict_ in self.dicts:
item_list.extend(dict_.items())
return item_list
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
class LocalCache(collections.OrderedDict):
"""Dictionary with a finite number of keys.
Older items expires first.
"""
def __init__(self, limit=None):
super(LocalCache, self).__init__()
self.limit = limit
def __setitem__(self, key, value):
while len(self) >= self.limit:
self.popitem(last=False)
super(LocalCache, self).__setitem__(key, value)
class SequenceExclude(object):
"""Object to test if an item is NOT within some sequence."""
def __init__(self, seq):
self.seq = seq
def __contains__(self, item):
return item not in self.seq
"""
This module contains data types used by Scrapy which are not included in the
Python Standard Library.
This module must not depend on any module outside the Standard Library.
"""
import copy
import collections
import warnings
import six
from scrapy.exceptions import ScrapyDeprecationWarning
if six.PY2:
Mapping = collections.Mapping
else:
Mapping = collections.abc.Mapping
class MultiValueDictKeyError(KeyError):
def __init__(self, *args, **kwargs):
warnings.warn(
"scrapy.utils.datatypes.MultiValueDictKeyError is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2
)
super(MultiValueDictKeyError, self).__init__(*args, **kwargs)
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
warnings.warn("scrapy.utils.datatypes.MultiValueDict is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2)
dict.__init__(self, key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, dict.__repr__(self))
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = dict.__getitem__(self, key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
dict.__setitem__(self, key, [value])
def __copy__(self):
return self.__class__(dict.items(self))
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def get(self, key, default=None):
"Returns the default value if the requested data doesn't exist"
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"Returns an empty list if the requested data doesn't exist"
try:
return dict.__getitem__(self, key)
except KeyError:
return []
def setlist(self, key, list_):
dict.__setitem__(self, key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"Appends an item to the internal list associated with key"
self.setlistdefault(key, [])
dict.__setitem__(self, key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def lists(self):
"Returns a list of (key, list) pairs."
return dict.items(self)
def values(self):
"Returns a list of the last value on every key list."
return [self[key] for key in self.keys()]
def copy(self):
"Returns a copy of this object."
return self.__deepcopy__()
def update(self, *args, **kwargs):
"update() extends rather than replaces existing key lists. Also accepts keyword args."
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key, []).append(value)
class SiteNode(object):
"""Class to represent a site node (page, image or any other file)"""
def __init__(self, url):
warnings.warn(
"scrapy.utils.datatypes.SiteNode is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2
)
self.url = url
self.itemnames = []
self.children = []
self.parent = None
def add_child(self, node):
self.children.append(node)
node.parent = self
def to_string(self, level=0):
s = "%s%s\n" % (' '*level, self.url)
if self.itemnames:
for n in self.itemnames:
s += "%sScraped: %s\n" % (' '*(level+1), n)
for node in self.children:
s += node.to_string(level+1)
return s
class CaselessDict(dict):
__slots__ = ()
def __init__(self, seq=None):
super(CaselessDict, self).__init__()
if seq:
self.update(seq)
def __getitem__(self, key):
return dict.__getitem__(self, self.normkey(key))
def __setitem__(self, key, value):
dict.__setitem__(self, self.normkey(key), self.normvalue(value))
def __delitem__(self, key):
dict.__delitem__(self, self.normkey(key))
def __contains__(self, key):
return dict.__contains__(self, self.normkey(key))
has_key = __contains__
def __copy__(self):
return self.__class__(self)
copy = __copy__
def normkey(self, key):
"""Method to normalize dictionary key access"""
return key.lower()
def normvalue(self, value):
"""Method to normalize values prior to be setted"""
return value
def get(self, key, def_val=None):
return dict.get(self, self.normkey(key), self.normvalue(def_val))
def setdefault(self, key, def_val=None):
return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))
def update(self, seq):
seq = seq.items() if isinstance(seq, Mapping) else seq
iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)
super(CaselessDict, self).update(iseq)
@classmethod
def fromkeys(cls, keys, value=None):
return cls((k, value) for k in keys)
def pop(self, key, *args):
return dict.pop(self, self.normkey(key), *args)
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
if not six.PY2:
warnings.warn(
"scrapy.utils.datatypes.MergeDict is deprecated in favor "
"of collections.ChainMap (introduced in Python 3.3)",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def items(self):
item_list = []
for dict_ in self.dicts:
item_list.extend(dict_.items())
return item_list
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
class LocalCache(collections.OrderedDict):
"""Dictionary with a finite number of keys.
Older items expires first.
"""
def __init__(self, limit=None):
super(LocalCache, self).__init__()
self.limit = limit
def __setitem__(self, key, value):
if self.limit:
while len(self) >= self.limit:
self.popitem(last=False)
super(LocalCache, self).__setitem__(key, value)
class SequenceExclude(object):
"""Object to test if an item is NOT within some sequence."""
def __init__(self, seq):
self.seq = seq
def __contains__(self, item):
return item not in self.seq
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-2-fixed/scrapy/scrapy/utils/datatypes.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-2-buggy/scrapy/scrapy/utils/datatypes.py |
scrapy-bug-25 | """
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
import six
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
return form.action or form.base_url
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
text = response.body_as_unicode()
root = create_root_node(text, lxml.html.HTMLParser, base_url=response.url)
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
raise ValueError('No <form> element found with %s' % formxpath)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[@type!="submit" and @type!="image" and @type!="reset"'
'and ((@type!="checkbox" and @type!="radio") or @checked)]')
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend(formdata.items())
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [el for el in form.xpath('descendant::input[@type="submit"]'
'|descendant::button[@type="submit"]'
'|descendant::button[not(@type)]')]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
"""
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
import six
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
return form.action or form.base_url
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
text = response.body_as_unicode()
root = create_root_node(text, lxml.html.HTMLParser, base_url=response.url)
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
raise ValueError('No <form> element found with %s' % formxpath)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[@type!="submit" and @type!="image" and @type!="reset"'
'and ((@type!="checkbox" and @type!="radio") or @checked)]')
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend(formdata.items())
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [el for el in form.xpath('descendant::input[@type="submit"]'
'|descendant::button[@type="submit"]'
'|descendant::button[not(@type)]')]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-25-fixed/scrapy/scrapy/http/request/form.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-25-buggy/scrapy/scrapy/http/request/form.py |
scrapy-bug-4 | import sys
import re
from functools import wraps
from unittest import TestCase
from scrapy.http import Request
from scrapy.utils.spider import iterate_spider_output
from scrapy.utils.python import get_spec
class ContractsManager(object):
contracts = {}
def __init__(self, contracts):
for contract in contracts:
self.contracts[contract.name] = contract
def tested_methods_from_spidercls(self, spidercls):
methods = []
for key, value in vars(spidercls).items():
if (callable(value) and value.__doc__ and
re.search(r'^\s*@', value.__doc__, re.MULTILINE)):
methods.append(key)
return methods
def extract_contracts(self, method):
contracts = []
for line in method.__doc__.split('\n'):
line = line.strip()
if line.startswith('@'):
name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
args = re.split(r'\s+', args)
contracts.append(self.contracts[name](method, *args))
return contracts
def from_spider(self, spider, results):
requests = []
for method in self.tested_methods_from_spidercls(type(spider)):
bound_method = spider.__getattribute__(method)
requests.append(self.from_method(bound_method, results))
return requests
def from_method(self, method, results):
contracts = self.extract_contracts(method)
if contracts:
# calculate request args
args, kwargs = get_spec(Request.__init__)
kwargs['callback'] = method
for contract in contracts:
kwargs = contract.adjust_request_args(kwargs)
# create and prepare request
args.remove('self')
if set(args).issubset(set(kwargs)):
request = Request(**kwargs)
# execute pre and post hooks in order
for contract in reversed(contracts):
request = contract.add_pre_hook(request, results)
for contract in contracts:
request = contract.add_post_hook(request, results)
self._clean_req(request, method, results)
return request
def _clean_req(self, request, method, results):
""" stop the request from returning objects and records any errors """
cb = request.callback
@wraps(cb)
def cb_wrapper(response):
try:
output = cb(response)
output = list(iterate_spider_output(output))
except:
case = _create_testcase(method, 'callback')
results.addError(case, sys.exc_info())
def eb_wrapper(failure):
case = _create_testcase(method, 'errback')
exc_info = failure.value, failure.type, failure.getTracebackObject()
results.addError(case, exc_info)
request.callback = cb_wrapper
request.errback = eb_wrapper
class Contract(object):
""" Abstract class for contracts """
def __init__(self, method, *args):
self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)
self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)
self.args = args
def add_pre_hook(self, request, results):
if hasattr(self, 'pre_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
try:
results.startTest(self.testcase_pre)
self.pre_process(response)
results.stopTest(self.testcase_pre)
except AssertionError:
results.addFailure(self.testcase_pre, sys.exc_info())
except Exception:
results.addError(self.testcase_pre, sys.exc_info())
else:
results.addSuccess(self.testcase_pre)
finally:
return list(iterate_spider_output(cb(response)))
request.callback = wrapper
return request
def add_post_hook(self, request, results):
if hasattr(self, 'post_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
output = list(iterate_spider_output(cb(response)))
try:
results.startTest(self.testcase_post)
self.post_process(output)
results.stopTest(self.testcase_post)
except AssertionError:
results.addFailure(self.testcase_post, sys.exc_info())
except Exception:
results.addError(self.testcase_post, sys.exc_info())
else:
results.addSuccess(self.testcase_post)
finally:
return output
request.callback = wrapper
return request
def adjust_request_args(self, args):
return args
def _create_testcase(method, desc):
spider = method.__self__.name
class ContractTestCase(TestCase):
def __str__(_self):
return "[%s] %s (%s)" % (spider, method.__name__, desc)
name = '%s_%s' % (spider, method.__name__)
setattr(ContractTestCase, name, lambda x: x)
return ContractTestCase(name)
import sys
import re
from functools import wraps
from unittest import TestCase
from scrapy.http import Request
from scrapy.utils.spider import iterate_spider_output
from scrapy.utils.python import get_spec
class ContractsManager(object):
contracts = {}
def __init__(self, contracts):
for contract in contracts:
self.contracts[contract.name] = contract
def tested_methods_from_spidercls(self, spidercls):
methods = []
for key, value in vars(spidercls).items():
if (callable(value) and value.__doc__ and
re.search(r'^\s*@', value.__doc__, re.MULTILINE)):
methods.append(key)
return methods
def extract_contracts(self, method):
contracts = []
for line in method.__doc__.split('\n'):
line = line.strip()
if line.startswith('@'):
name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
args = re.split(r'\s+', args)
contracts.append(self.contracts[name](method, *args))
return contracts
def from_spider(self, spider, results):
requests = []
for method in self.tested_methods_from_spidercls(type(spider)):
bound_method = spider.__getattribute__(method)
requests.append(self.from_method(bound_method, results))
return requests
def from_method(self, method, results):
contracts = self.extract_contracts(method)
if contracts:
# calculate request args
args, kwargs = get_spec(Request.__init__)
kwargs['callback'] = method
for contract in contracts:
kwargs = contract.adjust_request_args(kwargs)
# create and prepare request
args.remove('self')
if set(args).issubset(set(kwargs)):
request = Request(**kwargs)
# execute pre and post hooks in order
for contract in reversed(contracts):
request = contract.add_pre_hook(request, results)
for contract in contracts:
request = contract.add_post_hook(request, results)
self._clean_req(request, method, results)
return request
def _clean_req(self, request, method, results):
""" stop the request from returning objects and records any errors """
cb = request.callback
@wraps(cb)
def cb_wrapper(response):
try:
output = cb(response)
output = list(iterate_spider_output(output))
except:
case = _create_testcase(method, 'callback')
results.addError(case, sys.exc_info())
def eb_wrapper(failure):
case = _create_testcase(method, 'errback')
exc_info = failure.type, failure.value, failure.getTracebackObject()
results.addError(case, exc_info)
request.callback = cb_wrapper
request.errback = eb_wrapper
class Contract(object):
""" Abstract class for contracts """
def __init__(self, method, *args):
self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)
self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)
self.args = args
def add_pre_hook(self, request, results):
if hasattr(self, 'pre_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
try:
results.startTest(self.testcase_pre)
self.pre_process(response)
results.stopTest(self.testcase_pre)
except AssertionError:
results.addFailure(self.testcase_pre, sys.exc_info())
except Exception:
results.addError(self.testcase_pre, sys.exc_info())
else:
results.addSuccess(self.testcase_pre)
finally:
return list(iterate_spider_output(cb(response)))
request.callback = wrapper
return request
def add_post_hook(self, request, results):
if hasattr(self, 'post_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
output = list(iterate_spider_output(cb(response)))
try:
results.startTest(self.testcase_post)
self.post_process(output)
results.stopTest(self.testcase_post)
except AssertionError:
results.addFailure(self.testcase_post, sys.exc_info())
except Exception:
results.addError(self.testcase_post, sys.exc_info())
else:
results.addSuccess(self.testcase_post)
finally:
return output
request.callback = wrapper
return request
def adjust_request_args(self, args):
return args
def _create_testcase(method, desc):
spider = method.__self__.name
class ContractTestCase(TestCase):
def __str__(_self):
return "[%s] %s (%s)" % (spider, method.__name__, desc)
name = '%s_%s' % (spider, method.__name__)
setattr(ContractTestCase, name, lambda x: x)
return ContractTestCase(name)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-4-fixed/scrapy/scrapy/contracts/__init__.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-4-buggy/scrapy/scrapy/contracts/__init__.py |
scrapy-bug-38 | """
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, formcss=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
if formcss is not None:
from parsel.csstranslator import HTMLTranslator
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
action = form.get('action')
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(@type) or @type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../@checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend((k, v) for k, v in formdata.items() if v is not None)
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::*[(self::input or self::button)'
' and re:test(@type, "^submit$", "i")]'
'|descendant::button[not(@type)]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
"""
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, formcss=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
if formcss is not None:
from parsel.csstranslator import HTMLTranslator
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
action = form.get('action')
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(@type) or @type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../@checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend((k, v) for k, v in formdata.items() if v is not None)
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::*[(self::input or self::button)'
' and re:test(@type, "^submit$", "i")]'
'|descendant::button[not(@type)]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-38-fixed/scrapy/scrapy/http/request/form.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-38-buggy/scrapy/scrapy/http/request/form.py |
scrapy-bug-36 | """Helper functions which don't fit anywhere else"""
import ast
import inspect
import os
import re
import hashlib
import warnings
from contextlib import contextmanager
from importlib import import_module
from pkgutil import iter_modules
from textwrap import dedent
from w3lib.html import replace_entities
from scrapy.utils.datatypes import LocalWeakReferencedCache
from scrapy.utils.python import flatten, to_unicode
from scrapy.item import BaseItem
_ITERABLE_SINGLE_VALUES = dict, BaseItem, str, bytes
def arg_to_iter(arg):
"""Convert an argument to an iterable. The argument can be a None, single
value, or an iterable.
Exception: if arg is a dict, [arg] will be returned
"""
if arg is None:
return []
elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
return arg
else:
return [arg]
def load_object(path):
"""Load an object given its absolute object path, and return it.
object can be the import path of a class, function, variable or an
instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
"""
try:
dot = path.rindex('.')
except ValueError:
raise ValueError("Error loading object '%s': not a full path" % path)
module, name = path[:dot], path[dot + 1:]
mod = import_module(module)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
return obj
def walk_modules(path):
"""Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('scrapy.utils')
"""
mods = []
mod = import_module(path)
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def extract_regex(regex, text, encoding='utf-8'):
"""Extract a list of unicode strings from the given text/encoding using the following policies:
* if the regex contains a named group called "extract" that will be returned
* if the regex contains multiple numbered groups, all those will be returned (flattened)
* if the regex doesn't contain any group the entire regex matching is returned
"""
if isinstance(regex, str):
regex = re.compile(regex, re.UNICODE)
try:
strings = [regex.search(text).group('extract')] # named group
except Exception:
strings = regex.findall(text) # full regex or numbered groups
strings = flatten(strings)
if isinstance(text, str):
return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
else:
return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
for s in strings]
def md5sum(file):
"""Calculate the md5 checksum of a file-like object without reading its
whole content in memory.
>>> from io import BytesIO
>>> md5sum(BytesIO(b'file content to hash'))
'784406af91dd5a54fbb9c84c2236595a'
"""
m = hashlib.md5()
while True:
d = file.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
def rel_has_nofollow(rel):
"""Return True if link rel attribute has nofollow type"""
return rel is not None and 'nofollow' in rel.split()
def create_instance(objcls, settings, crawler, *args, **kwargs):
"""Construct a class instance using its ``from_crawler`` or
``from_settings`` constructors, if available.
At least one of ``settings`` and ``crawler`` needs to be different from
``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
tried.
``*args`` and ``**kwargs`` are forwarded to the constructors.
Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
"""
if settings is None:
if crawler is None:
raise ValueError("Specify at least one of settings and crawler.")
settings = crawler.settings
if crawler and hasattr(objcls, 'from_crawler'):
return objcls.from_crawler(crawler, *args, **kwargs)
elif hasattr(objcls, 'from_settings'):
return objcls.from_settings(settings, *args, **kwargs)
else:
return objcls(*args, **kwargs)
@contextmanager
def set_environ(**kwargs):
"""Temporarily set environment variables inside the context manager and
fully restore previous environment afterwards
"""
original_env = {k: os.environ.get(k) for k in kwargs}
os.environ.update(kwargs)
try:
yield
finally:
for k, v in original_env.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
def is_generator_with_return_value(callable):
"""
Returns True if a callable is a generator function which includes a
'return' statement with a value different than None, False otherwise
"""
if callable in _generator_callbacks_cache:
return _generator_callbacks_cache[callable]
def returns_none(return_node):
value = return_node.value
return value is None or isinstance(value, ast.NameConstant) and value.value is None
if inspect.isgeneratorfunction(callable):
tree = ast.parse(dedent(inspect.getsource(callable)))
for node in ast.walk(tree):
if isinstance(node, ast.Return) and not returns_none(node):
_generator_callbacks_cache[callable] = True
return _generator_callbacks_cache[callable]
_generator_callbacks_cache[callable] = False
return _generator_callbacks_cache[callable]
def warn_on_generator_with_return_value(spider, callable):
"""
Logs a warning if a callable is a generator function and includes
a 'return' statement with a value different than None
"""
if is_generator_with_return_value(callable):
warnings.warn(
'The "{}.{}" method is a generator and includes a "return" statement with a '
'value different than None. This could lead to unexpected behaviour. Please see '
'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
'for details about the semantics of the "return" statement within generators'
.format(spider.__class__.__name__, callable.__name__), stacklevel=2,
)
"""Helper functions which don't fit anywhere else"""
import ast
import inspect
import os
import re
import hashlib
import warnings
from contextlib import contextmanager
from importlib import import_module
from pkgutil import iter_modules
from textwrap import dedent
from w3lib.html import replace_entities
from scrapy.utils.datatypes import LocalWeakReferencedCache
from scrapy.utils.python import flatten, to_unicode
from scrapy.item import BaseItem
_ITERABLE_SINGLE_VALUES = dict, BaseItem, str, bytes
def arg_to_iter(arg):
"""Convert an argument to an iterable. The argument can be a None, single
value, or an iterable.
Exception: if arg is a dict, [arg] will be returned
"""
if arg is None:
return []
elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
return arg
else:
return [arg]
def load_object(path):
"""Load an object given its absolute object path, and return it.
object can be the import path of a class, function, variable or an
instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
"""
try:
dot = path.rindex('.')
except ValueError:
raise ValueError("Error loading object '%s': not a full path" % path)
module, name = path[:dot], path[dot + 1:]
mod = import_module(module)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
return obj
def walk_modules(path):
"""Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('scrapy.utils')
"""
mods = []
mod = import_module(path)
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def extract_regex(regex, text, encoding='utf-8'):
"""Extract a list of unicode strings from the given text/encoding using the following policies:
* if the regex contains a named group called "extract" that will be returned
* if the regex contains multiple numbered groups, all those will be returned (flattened)
* if the regex doesn't contain any group the entire regex matching is returned
"""
if isinstance(regex, str):
regex = re.compile(regex, re.UNICODE)
try:
strings = [regex.search(text).group('extract')] # named group
except Exception:
strings = regex.findall(text) # full regex or numbered groups
strings = flatten(strings)
if isinstance(text, str):
return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
else:
return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
for s in strings]
def md5sum(file):
"""Calculate the md5 checksum of a file-like object without reading its
whole content in memory.
>>> from io import BytesIO
>>> md5sum(BytesIO(b'file content to hash'))
'784406af91dd5a54fbb9c84c2236595a'
"""
m = hashlib.md5()
while True:
d = file.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
def rel_has_nofollow(rel):
"""Return True if link rel attribute has nofollow type"""
return rel is not None and 'nofollow' in rel.split()
def create_instance(objcls, settings, crawler, *args, **kwargs):
"""Construct a class instance using its ``from_crawler`` or
``from_settings`` constructors, if available.
At least one of ``settings`` and ``crawler`` needs to be different from
``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
tried.
``*args`` and ``**kwargs`` are forwarded to the constructors.
Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an
extension has not been implemented correctly).
"""
if settings is None:
if crawler is None:
raise ValueError("Specify at least one of settings and crawler.")
settings = crawler.settings
if crawler and hasattr(objcls, 'from_crawler'):
instance = objcls.from_crawler(crawler, *args, **kwargs)
method_name = 'from_crawler'
elif hasattr(objcls, 'from_settings'):
instance = objcls.from_settings(settings, *args, **kwargs)
method_name = 'from_settings'
else:
instance = objcls(*args, **kwargs)
method_name = '__new__'
if instance is None:
raise TypeError("%s.%s returned None" % (objcls.__qualname__, method_name))
return instance
@contextmanager
def set_environ(**kwargs):
"""Temporarily set environment variables inside the context manager and
fully restore previous environment afterwards
"""
original_env = {k: os.environ.get(k) for k in kwargs}
os.environ.update(kwargs)
try:
yield
finally:
for k, v in original_env.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
def is_generator_with_return_value(callable):
"""
Returns True if a callable is a generator function which includes a
'return' statement with a value different than None, False otherwise
"""
if callable in _generator_callbacks_cache:
return _generator_callbacks_cache[callable]
def returns_none(return_node):
value = return_node.value
return value is None or isinstance(value, ast.NameConstant) and value.value is None
if inspect.isgeneratorfunction(callable):
tree = ast.parse(dedent(inspect.getsource(callable)))
for node in ast.walk(tree):
if isinstance(node, ast.Return) and not returns_none(node):
_generator_callbacks_cache[callable] = True
return _generator_callbacks_cache[callable]
_generator_callbacks_cache[callable] = False
return _generator_callbacks_cache[callable]
def warn_on_generator_with_return_value(spider, callable):
"""
Logs a warning if a callable is a generator function and includes
a 'return' statement with a value different than None
"""
if is_generator_with_return_value(callable):
warnings.warn(
'The "{}.{}" method is a generator and includes a "return" statement with a '
'value different than None. This could lead to unexpected behaviour. Please see '
'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
'for details about the semantics of the "return" statement within generators'
.format(spider.__class__.__name__, callable.__name__), stacklevel=2,
)
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-36-fixed/scrapy/scrapy/utils/misc.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-36-buggy/scrapy/scrapy/utils/misc.py |
scrapy-bug-30 | from __future__ import print_function
import sys
import optparse
import cProfile
import inspect
import pkg_resources
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.xlib import lsprofcalltree
from scrapy.commands import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.utils.misc import walk_modules
from scrapy.utils.project import inside_project, get_project_settings
from scrapy.settings.deprecated import check_deprecated_settings
def _iter_command_classes(module_name):
# TODO: add `name` attribute to commands and and merge this function with
# scrapy.utils.spider.iter_spider_classes
for module in walk_modules(module_name):
for obj in vars(module).itervalues():
if inspect.isclass(obj) and \
issubclass(obj, ScrapyCommand) and \
obj.__module__ == module.__name__:
yield obj
def _get_commands_from_module(module, inproject):
d = {}
for cmd in _iter_command_classes(module):
if inproject or not cmd.requires_project:
cmdname = cmd.__module__.split('.')[-1]
d[cmdname] = cmd()
return d
def _get_commands_from_entry_points(inproject, group='scrapy.commands'):
cmds = {}
for entry_point in pkg_resources.iter_entry_points(group):
obj = entry_point.load()
if inspect.isclass(obj):
cmds[entry_point.name] = obj()
else:
raise Exception("Invalid entry point %s" % entry_point.name)
return cmds
def _get_commands_dict(settings, inproject):
cmds = _get_commands_from_module('scrapy.commands', inproject)
cmds.update(_get_commands_from_entry_points(inproject))
cmds_module = settings['COMMANDS_MODULE']
if cmds_module:
cmds.update(_get_commands_from_module(cmds_module, inproject))
return cmds
def _pop_command_name(argv):
i = 0
for arg in argv[1:]:
if not arg.startswith('-'):
del argv[i]
return arg
i += 1
def _print_header(settings, inproject):
if inproject:
print("Scrapy %s - project: %s\n" % (scrapy.__version__, \
settings['BOT_NAME']))
else:
print("Scrapy %s - no active project\n" % scrapy.__version__)
def _print_commands(settings, inproject):
_print_header(settings, inproject)
print("Usage:")
print(" scrapy <command> [options] [args]\n")
print("Available commands:")
cmds = _get_commands_dict(settings, inproject)
for cmdname, cmdclass in sorted(cmds.items()):
print(" %-13s %s" % (cmdname, cmdclass.short_desc()))
if not inproject:
print()
print(" [ more ] More commands available when run from project directory")
print()
print('Use "scrapy <command> -h" to see more info about a command')
def _print_unknown_command(settings, cmdname, inproject):
_print_header(settings, inproject)
print("Unknown command: %s\n" % cmdname)
print('Use "scrapy" to see available commands')
def _run_print_help(parser, func, *a, **kw):
try:
func(*a, **kw)
except UsageError as e:
if str(e):
parser.error(str(e))
if e.print_help:
parser.print_help()
sys.exit(2)
def execute(argv=None, settings=None):
if argv is None:
argv = sys.argv
# --- backwards compatibility for scrapy.conf.settings singleton ---
if settings is None and 'scrapy.conf' in sys.modules:
from scrapy import conf
if hasattr(conf, 'settings'):
settings = conf.settings
# ------------------------------------------------------------------
if settings is None:
settings = get_project_settings()
check_deprecated_settings(settings)
# --- backwards compatibility for scrapy.conf.settings singleton ---
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
from scrapy import conf
conf.settings = settings
# ------------------------------------------------------------------
inproject = inside_project()
cmds = _get_commands_dict(settings, inproject)
cmdname = _pop_command_name(argv)
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), \
conflict_handler='resolve')
if not cmdname:
_print_commands(settings, inproject)
sys.exit(0)
elif cmdname not in cmds:
_print_unknown_command(settings, cmdname, inproject)
sys.exit(2)
cmd = cmds[cmdname]
parser.usage = "scrapy %s %s" % (cmdname, cmd.syntax())
parser.description = cmd.long_desc()
settings.setdict(cmd.default_settings, priority='command')
cmd.settings = settings
cmd.add_options(parser)
opts, args = parser.parse_args(args=argv[1:])
_run_print_help(parser, cmd.process_options, args, opts)
cmd.crawler_process = CrawlerProcess(settings)
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)
def _run_command(cmd, args, opts):
if opts.profile or opts.lsprof:
_run_command_profiled(cmd, args, opts)
else:
cmd.run(args, opts)
def _run_command_profiled(cmd, args, opts):
if opts.profile:
sys.stderr.write("scrapy: writing cProfile stats to %r\n" % opts.profile)
if opts.lsprof:
sys.stderr.write("scrapy: writing lsprof stats to %r\n" % opts.lsprof)
loc = locals()
p = cProfile.Profile()
p.runctx('cmd.run(args, opts)', globals(), loc)
if opts.profile:
p.dump_stats(opts.profile)
k = lsprofcalltree.KCacheGrind(p)
if opts.lsprof:
with open(opts.lsprof, 'w') as f:
k.output(f)
if __name__ == '__main__':
execute()
from __future__ import print_function
import sys
import optparse
import cProfile
import inspect
import pkg_resources
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.xlib import lsprofcalltree
from scrapy.commands import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.utils.misc import walk_modules
from scrapy.utils.project import inside_project, get_project_settings
from scrapy.settings.deprecated import check_deprecated_settings
def _iter_command_classes(module_name):
# TODO: add `name` attribute to commands and and merge this function with
# scrapy.utils.spider.iter_spider_classes
for module in walk_modules(module_name):
for obj in vars(module).values():
if inspect.isclass(obj) and \
issubclass(obj, ScrapyCommand) and \
obj.__module__ == module.__name__:
yield obj
def _get_commands_from_module(module, inproject):
d = {}
for cmd in _iter_command_classes(module):
if inproject or not cmd.requires_project:
cmdname = cmd.__module__.split('.')[-1]
d[cmdname] = cmd()
return d
def _get_commands_from_entry_points(inproject, group='scrapy.commands'):
cmds = {}
for entry_point in pkg_resources.iter_entry_points(group):
obj = entry_point.load()
if inspect.isclass(obj):
cmds[entry_point.name] = obj()
else:
raise Exception("Invalid entry point %s" % entry_point.name)
return cmds
def _get_commands_dict(settings, inproject):
cmds = _get_commands_from_module('scrapy.commands', inproject)
cmds.update(_get_commands_from_entry_points(inproject))
cmds_module = settings['COMMANDS_MODULE']
if cmds_module:
cmds.update(_get_commands_from_module(cmds_module, inproject))
return cmds
def _pop_command_name(argv):
i = 0
for arg in argv[1:]:
if not arg.startswith('-'):
del argv[i]
return arg
i += 1
def _print_header(settings, inproject):
if inproject:
print("Scrapy %s - project: %s\n" % (scrapy.__version__, \
settings['BOT_NAME']))
else:
print("Scrapy %s - no active project\n" % scrapy.__version__)
def _print_commands(settings, inproject):
_print_header(settings, inproject)
print("Usage:")
print(" scrapy <command> [options] [args]\n")
print("Available commands:")
cmds = _get_commands_dict(settings, inproject)
for cmdname, cmdclass in sorted(cmds.items()):
print(" %-13s %s" % (cmdname, cmdclass.short_desc()))
if not inproject:
print()
print(" [ more ] More commands available when run from project directory")
print()
print('Use "scrapy <command> -h" to see more info about a command')
def _print_unknown_command(settings, cmdname, inproject):
_print_header(settings, inproject)
print("Unknown command: %s\n" % cmdname)
print('Use "scrapy" to see available commands')
def _run_print_help(parser, func, *a, **kw):
try:
func(*a, **kw)
except UsageError as e:
if str(e):
parser.error(str(e))
if e.print_help:
parser.print_help()
sys.exit(2)
def execute(argv=None, settings=None):
if argv is None:
argv = sys.argv
# --- backwards compatibility for scrapy.conf.settings singleton ---
if settings is None and 'scrapy.conf' in sys.modules:
from scrapy import conf
if hasattr(conf, 'settings'):
settings = conf.settings
# ------------------------------------------------------------------
if settings is None:
settings = get_project_settings()
check_deprecated_settings(settings)
# --- backwards compatibility for scrapy.conf.settings singleton ---
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
from scrapy import conf
conf.settings = settings
# ------------------------------------------------------------------
inproject = inside_project()
cmds = _get_commands_dict(settings, inproject)
cmdname = _pop_command_name(argv)
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), \
conflict_handler='resolve')
if not cmdname:
_print_commands(settings, inproject)
sys.exit(0)
elif cmdname not in cmds:
_print_unknown_command(settings, cmdname, inproject)
sys.exit(2)
cmd = cmds[cmdname]
parser.usage = "scrapy %s %s" % (cmdname, cmd.syntax())
parser.description = cmd.long_desc()
settings.setdict(cmd.default_settings, priority='command')
cmd.settings = settings
cmd.add_options(parser)
opts, args = parser.parse_args(args=argv[1:])
_run_print_help(parser, cmd.process_options, args, opts)
cmd.crawler_process = CrawlerProcess(settings)
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)
def _run_command(cmd, args, opts):
if opts.profile or opts.lsprof:
_run_command_profiled(cmd, args, opts)
else:
cmd.run(args, opts)
def _run_command_profiled(cmd, args, opts):
if opts.profile:
sys.stderr.write("scrapy: writing cProfile stats to %r\n" % opts.profile)
if opts.lsprof:
sys.stderr.write("scrapy: writing lsprof stats to %r\n" % opts.lsprof)
loc = locals()
p = cProfile.Profile()
p.runctx('cmd.run(args, opts)', globals(), loc)
if opts.profile:
p.dump_stats(opts.profile)
k = lsprofcalltree.KCacheGrind(p)
if opts.lsprof:
with open(opts.lsprof, 'w') as f:
k.output(f)
if __name__ == '__main__':
execute()
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-30-fixed/scrapy/scrapy/cmdline.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-30-buggy/scrapy/scrapy/cmdline.py |
scrapy-bug-8 | """
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from collections import MutableMapping
from abc import ABCMeta
import six
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
def __new__(mcs, class_name, bases, attrs):
new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))
_class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)
fields = getattr(_class, 'fields', {})
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs['fields'] = fields
new_attrs['_class'] = _class
return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)
class DictItem(MutableMapping, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" %
(name, value))
super(DictItem, self).__setattr__(name, value)
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
__hash__ = BaseItem.__hash__
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
def copy(self):
return self.__class__(self)
@six.add_metaclass(ItemMeta)
class Item(DictItem):
pass
"""
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from collections import MutableMapping
from abc import ABCMeta
import six
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
def __new__(mcs, class_name, bases, attrs):
classcell = attrs.pop('__classcell__', None)
new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))
_class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)
fields = getattr(_class, 'fields', {})
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs['fields'] = fields
new_attrs['_class'] = _class
if classcell is not None:
new_attrs['__classcell__'] = classcell
return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)
class DictItem(MutableMapping, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" %
(name, value))
super(DictItem, self).__setattr__(name, value)
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
__hash__ = BaseItem.__hash__
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
def copy(self):
return self.__class__(self)
@six.add_metaclass(ItemMeta)
class Item(DictItem):
pass
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-8-fixed/scrapy/scrapy/item.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-8-buggy/scrapy/scrapy/item.py |
scrapy-bug-27 | import logging
from six.moves.urllib.parse import urljoin
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""Handle redirection of requests based on response status and meta-refresh html tag"""
def process_response(self, request, response, spider):
if (request.meta.get('dont_redirect', False) or
response.status in getattr(spider, 'handle_httpstatus_list', [])):
return response
if request.method == 'HEAD':
if response.status in [301, 302, 303, 307] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
else:
return response
if response.status in [302, 303] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
if response.status in [301, 307] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
return response
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
if isinstance(response, HtmlResponse):
interval, url = get_meta_refresh(response)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
import logging
from six.moves.urllib.parse import urljoin
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""Handle redirection of requests based on response status and meta-refresh html tag"""
def process_response(self, request, response, spider):
if (request.meta.get('dont_redirect', False) or
response.status in getattr(spider, 'handle_httpstatus_list', []) or
response.status in request.meta.get('handle_httpstatus_list', []) or
request.meta.get('handle_httpstatus_all', False)):
return response
if request.method == 'HEAD':
if response.status in [301, 302, 303, 307] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
else:
return response
if response.status in [302, 303] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
if response.status in [301, 307] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
return response
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
if isinstance(response, HtmlResponse):
interval, url = get_meta_refresh(response)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-27-fixed/scrapy/scrapy/downloadermiddlewares/redirect.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-27-buggy/scrapy/scrapy/downloadermiddlewares/redirect.py |
scrapy-bug-40 | """
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import sys
import pprint
import marshal
import six
from six.moves import cPickle as pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, to_native_str, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, **kwargs):
self._configure(kwargs)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses constructors)
"""
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.encoding = options.pop('encoding', 'utf-8')
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = six.iterkeys(item.fields)
else:
field_iter = six.iterkeys(item)
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict) + '\n'))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
self.first_item = True
def start_exporting(self):
self.file.write(b"[")
def finish_exporting(self):
self.file.write(b"]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',\n')
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict)))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
self._configure(kwargs)
self.xg = XMLGenerator(file, encoding=self.encoding)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
def export_item(self, item):
self.xg.startElement(self.item_element, {})
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value)
self.xg.endElement(self.item_element)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value):
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
for subname, value in serialized_value.items():
self._export_xml_field(subname, value)
elif is_listlike(serialized_value):
for value in serialized_value:
self._export_xml_field('value', value)
else:
self._xg_characters(serialized_value)
self.xg.endElement(name)
# Workaround for http://bugs.python.org/issue17606
# Before Python 2.7.4 xml.sax.saxutils required bytes;
# since 2.7.4 it requires unicode. The bug is likely to be
# fixed in 2.7.6, but 2.7.6 will still support unicode,
# and Python 3.x will require unicode, so ">= 2.7.4" should be fine.
if sys.version_info[:3] >= (2, 7, 4):
def _xg_characters(self, serialized_value):
if not isinstance(serialized_value, six.text_type):
serialized_value = serialized_value.decode(self.encoding)
return self.xg.characters(serialized_value)
else: # pragma: no cover
def _xg_characters(self, serialized_value):
return self.xg.characters(serialized_value)
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
self._configure(kwargs, dont_fail=True)
self.include_headers_line = include_headers_line
file = file if six.PY2 else io.TextIOWrapper(file, line_buffering=True)
self.csv_writer = csv.writer(file, **kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_native_str(s)
except TypeError:
yield to_native_str(repr(s))
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
self._configure(kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""The idea behind this exporter is to have a mechanism to serialize items
to built-in python types so any serialization library (like
json, msgpack, binc, etc) can be used on top of it. Its main goal is to
seamless support what BaseItemExporter does plus nested items.
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
if self.binary:
return to_bytes(value, encoding=self.encoding)
else:
return to_unicode(value, encoding=self.encoding)
def _serialize_dict(self, value):
for key, val in six.iteritems(value):
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
"""
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import sys
import pprint
import marshal
import six
from six.moves import cPickle as pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, to_native_str, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, **kwargs):
self._configure(kwargs)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses constructors)
"""
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.encoding = options.pop('encoding', 'utf-8')
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = six.iterkeys(item.fields)
else:
field_iter = six.iterkeys(item)
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict) + '\n'))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
self.first_item = True
def start_exporting(self):
self.file.write(b"[")
def finish_exporting(self):
self.file.write(b"]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',\n')
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict)))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
self._configure(kwargs)
self.xg = XMLGenerator(file, encoding=self.encoding)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
def export_item(self, item):
self.xg.startElement(self.item_element, {})
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value)
self.xg.endElement(self.item_element)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value):
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
for subname, value in serialized_value.items():
self._export_xml_field(subname, value)
elif is_listlike(serialized_value):
for value in serialized_value:
self._export_xml_field('value', value)
else:
self._xg_characters(serialized_value)
self.xg.endElement(name)
# Workaround for http://bugs.python.org/issue17606
# Before Python 2.7.4 xml.sax.saxutils required bytes;
# since 2.7.4 it requires unicode. The bug is likely to be
# fixed in 2.7.6, but 2.7.6 will still support unicode,
# and Python 3.x will require unicode, so ">= 2.7.4" should be fine.
if sys.version_info[:3] >= (2, 7, 4):
def _xg_characters(self, serialized_value):
if not isinstance(serialized_value, six.text_type):
serialized_value = serialized_value.decode(self.encoding)
return self.xg.characters(serialized_value)
else: # pragma: no cover
def _xg_characters(self, serialized_value):
return self.xg.characters(serialized_value)
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
self._configure(kwargs, dont_fail=True)
self.include_headers_line = include_headers_line
file = file if six.PY2 else io.TextIOWrapper(file, line_buffering=True)
self.csv_writer = csv.writer(file, **kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_native_str(s)
except TypeError:
yield to_native_str(repr(s))
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
self._configure(kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""The idea behind this exporter is to have a mechanism to serialize items
to built-in python types so any serialization library (like
json, msgpack, binc, etc) can be used on top of it. Its main goal is to
seamless support what BaseItemExporter does plus nested items.
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
encode_func = to_bytes if self.binary else to_unicode
if isinstance(value, (six.text_type, bytes)):
return encode_func(value, encoding=self.encoding)
return value
def _serialize_dict(self, value):
for key, val in six.iteritems(value):
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-40-fixed/scrapy/scrapy/exporters.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-40-buggy/scrapy/scrapy/exporters.py |
scrapy-bug-22 | """
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import sys
import pprint
import marshal
import six
from six.moves import cPickle as pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, to_native_str, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, **kwargs):
self._configure(kwargs)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses constructors)
"""
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.encoding = options.pop('encoding', 'utf-8')
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = six.iterkeys(item.fields)
else:
field_iter = six.iterkeys(item)
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict) + '\n'))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
self.first_item = True
def start_exporting(self):
self.file.write(b"[")
def finish_exporting(self):
self.file.write(b"]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',\n')
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict)))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
self._configure(kwargs)
self.xg = XMLGenerator(file, encoding=self.encoding)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
def export_item(self, item):
self.xg.startElement(self.item_element, {})
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value)
self.xg.endElement(self.item_element)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value):
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
for subname, value in serialized_value.items():
self._export_xml_field(subname, value)
elif is_listlike(serialized_value):
for value in serialized_value:
self._export_xml_field('value', value)
else:
self._xg_characters(serialized_value)
self.xg.endElement(name)
# Workaround for http://bugs.python.org/issue17606
# Before Python 2.7.4 xml.sax.saxutils required bytes;
# since 2.7.4 it requires unicode. The bug is likely to be
# fixed in 2.7.6, but 2.7.6 will still support unicode,
# and Python 3.x will require unicode, so ">= 2.7.4" should be fine.
if sys.version_info[:3] >= (2, 7, 4):
def _xg_characters(self, serialized_value):
if not isinstance(serialized_value, six.text_type):
serialized_value = serialized_value.decode(self.encoding)
return self.xg.characters(serialized_value)
else: # pragma: no cover
def _xg_characters(self, serialized_value):
return self.xg.characters(serialized_value)
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
self._configure(kwargs, dont_fail=True)
self.include_headers_line = include_headers_line
file = file if six.PY2 else io.TextIOWrapper(file, line_buffering=True)
self.csv_writer = csv.writer(file, **kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_native_str(s)
except TypeError:
yield s
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
self._configure(kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""The idea behind this exporter is to have a mechanism to serialize items
to built-in python types so any serialization library (like
json, msgpack, binc, etc) can be used on top of it. Its main goal is to
seamless support what BaseItemExporter does plus nested items.
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
encode_func = to_bytes if self.binary else to_unicode
if isinstance(value, (six.text_type, bytes)):
return encode_func(value, encoding=self.encoding)
return value
def _serialize_dict(self, value):
for key, val in six.iteritems(value):
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
"""
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import sys
import pprint
import marshal
import six
from six.moves import cPickle as pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, to_native_str, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, **kwargs):
self._configure(kwargs)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses constructors)
"""
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.encoding = options.pop('encoding', 'utf-8')
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = six.iterkeys(item.fields)
else:
field_iter = six.iterkeys(item)
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict) + '\n'))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
self.encoder = ScrapyJSONEncoder(**kwargs)
self.first_item = True
def start_exporting(self):
self.file.write(b"[")
def finish_exporting(self):
self.file.write(b"]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',\n')
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(self.encoder.encode(itemdict)))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
self._configure(kwargs)
self.xg = XMLGenerator(file, encoding=self.encoding)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
def export_item(self, item):
self.xg.startElement(self.item_element, {})
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value)
self.xg.endElement(self.item_element)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value):
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
for subname, value in serialized_value.items():
self._export_xml_field(subname, value)
elif is_listlike(serialized_value):
for value in serialized_value:
self._export_xml_field('value', value)
elif isinstance(serialized_value, six.text_type):
self._xg_characters(serialized_value)
else:
self._xg_characters(str(serialized_value))
self.xg.endElement(name)
# Workaround for http://bugs.python.org/issue17606
# Before Python 2.7.4 xml.sax.saxutils required bytes;
# since 2.7.4 it requires unicode. The bug is likely to be
# fixed in 2.7.6, but 2.7.6 will still support unicode,
# and Python 3.x will require unicode, so ">= 2.7.4" should be fine.
if sys.version_info[:3] >= (2, 7, 4):
def _xg_characters(self, serialized_value):
if not isinstance(serialized_value, six.text_type):
serialized_value = serialized_value.decode(self.encoding)
return self.xg.characters(serialized_value)
else: # pragma: no cover
def _xg_characters(self, serialized_value):
return self.xg.characters(serialized_value)
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
self._configure(kwargs, dont_fail=True)
self.include_headers_line = include_headers_line
file = file if six.PY2 else io.TextIOWrapper(file, line_buffering=True)
self.csv_writer = csv.writer(file, **kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_native_str(s)
except TypeError:
yield s
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
self._configure(kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""The idea behind this exporter is to have a mechanism to serialize items
to built-in python types so any serialization library (like
json, msgpack, binc, etc) can be used on top of it. Its main goal is to
seamless support what BaseItemExporter does plus nested items.
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
encode_func = to_bytes if self.binary else to_unicode
if isinstance(value, (six.text_type, bytes)):
return encode_func(value, encoding=self.encoding)
return value
def _serialize_dict(self, value):
for key, val in six.iteritems(value):
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-22-fixed/scrapy/scrapy/exporters.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-22-buggy/scrapy/scrapy/exporters.py |
scrapy-bug-34 | """
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from collections import MutableMapping
from abc import ABCMeta
import six
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
def __new__(mcs, class_name, bases, attrs):
new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))
_class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)
fields = {}
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs['fields'] = fields
new_attrs['_class'] = _class
return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)
class DictItem(MutableMapping, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" %
(name, value))
super(DictItem, self).__setattr__(name, value)
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
__hash__ = BaseItem.__hash__
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
def copy(self):
return self.__class__(self)
@six.add_metaclass(ItemMeta)
class Item(DictItem):
pass
"""
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from collections import MutableMapping
from abc import ABCMeta
import six
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
def __new__(mcs, class_name, bases, attrs):
new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))
_class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)
fields = getattr(_class, 'fields', {})
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs['fields'] = fields
new_attrs['_class'] = _class
return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)
class DictItem(MutableMapping, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" %
(name, value))
super(DictItem, self).__setattr__(name, value)
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
__hash__ = BaseItem.__hash__
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
def copy(self):
return self.__class__(self)
@six.add_metaclass(ItemMeta)
class Item(DictItem):
pass
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-34-fixed/scrapy/scrapy/item.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-34-buggy/scrapy/scrapy/item.py |
scrapy-bug-29 | """
This module provides some useful functions for working with
scrapy.http.Request objects
"""
from __future__ import print_function
import hashlib
import weakref
from six.moves.urllib.parse import urlunparse
from twisted.internet.defer import Deferred
from w3lib.http import basic_auth_header
from scrapy.utils.python import to_bytes, to_native_str
from scrapy.utils.url import canonicalize_url
from scrapy.utils.httpobj import urlparse_cached
_fingerprint_cache = weakref.WeakKeyDictionary()
def request_fingerprint(request, include_headers=None):
"""
Return the request fingerprint.
The request fingerprint is a hash that uniquely identifies the resource the
request points to. For example, take the following two urls:
http://www.example.com/query?id=111&cat=222
http://www.example.com/query?cat=222&id=111
Even though those are two different URLs both point to the same resource
and are equivalent (ie. they should return the same response).
Another example are cookies used to store session ids. Suppose the
following page is only accesible to authenticated users:
http://www.example.com/members/offers.html
Lot of sites use a cookie to store the session id, which adds a random
component to the HTTP Request and thus should be ignored when calculating
the fingerprint.
For this reason, request headers are ignored by default when calculating
the fingeprint. If you want to include specific headers use the
include_headers argument, which is a list of Request headers to include.
"""
if include_headers:
include_headers = tuple([to_bytes(h.lower())
for h in sorted(include_headers)])
cache = _fingerprint_cache.setdefault(request, {})
if include_headers not in cache:
fp = hashlib.sha1()
fp.update(to_bytes(request.method))
fp.update(to_bytes(canonicalize_url(request.url)))
fp.update(request.body or b'')
if include_headers:
for hdr in include_headers:
if hdr in request.headers:
fp.update(hdr)
for v in request.headers.getlist(hdr):
fp.update(v)
cache[include_headers] = fp.hexdigest()
return cache[include_headers]
def request_authenticate(request, username, password):
"""Autenticate the given request (in place) using the HTTP basic access
authentication mechanism (RFC 2617) and the given username and password
"""
request.headers['Authorization'] = basic_auth_header(username, password)
def request_httprepr(request):
"""Return the raw HTTP representation (as bytes) of the given request.
This is provided only for reference since it's not the actual stream of
bytes that will be send when performing the request (that's controlled
by Twisted).
"""
parsed = urlparse_cached(request)
path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n"
s += b"Host: " + to_bytes(parsed.hostname) + b"\r\n"
if request.headers:
s += request.headers.to_string() + b"\r\n"
s += b"\r\n"
s += request.body
return s
"""
This module provides some useful functions for working with
scrapy.http.Request objects
"""
from __future__ import print_function
import hashlib
import weakref
from six.moves.urllib.parse import urlunparse
from twisted.internet.defer import Deferred
from w3lib.http import basic_auth_header
from scrapy.utils.python import to_bytes, to_native_str
from scrapy.utils.url import canonicalize_url
from scrapy.utils.httpobj import urlparse_cached
_fingerprint_cache = weakref.WeakKeyDictionary()
def request_fingerprint(request, include_headers=None):
"""
Return the request fingerprint.
The request fingerprint is a hash that uniquely identifies the resource the
request points to. For example, take the following two urls:
http://www.example.com/query?id=111&cat=222
http://www.example.com/query?cat=222&id=111
Even though those are two different URLs both point to the same resource
and are equivalent (ie. they should return the same response).
Another example are cookies used to store session ids. Suppose the
following page is only accesible to authenticated users:
http://www.example.com/members/offers.html
Lot of sites use a cookie to store the session id, which adds a random
component to the HTTP Request and thus should be ignored when calculating
the fingerprint.
For this reason, request headers are ignored by default when calculating
the fingeprint. If you want to include specific headers use the
include_headers argument, which is a list of Request headers to include.
"""
if include_headers:
include_headers = tuple([to_bytes(h.lower())
for h in sorted(include_headers)])
cache = _fingerprint_cache.setdefault(request, {})
if include_headers not in cache:
fp = hashlib.sha1()
fp.update(to_bytes(request.method))
fp.update(to_bytes(canonicalize_url(request.url)))
fp.update(request.body or b'')
if include_headers:
for hdr in include_headers:
if hdr in request.headers:
fp.update(hdr)
for v in request.headers.getlist(hdr):
fp.update(v)
cache[include_headers] = fp.hexdigest()
return cache[include_headers]
def request_authenticate(request, username, password):
"""Autenticate the given request (in place) using the HTTP basic access
authentication mechanism (RFC 2617) and the given username and password
"""
request.headers['Authorization'] = basic_auth_header(username, password)
def request_httprepr(request):
"""Return the raw HTTP representation (as bytes) of the given request.
This is provided only for reference since it's not the actual stream of
bytes that will be send when performing the request (that's controlled
by Twisted).
"""
parsed = urlparse_cached(request)
path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n"
s += b"Host: " + to_bytes(parsed.hostname or b'') + b"\r\n"
if request.headers:
s += request.headers.to_string() + b"\r\n"
s += b"\r\n"
s += request.body
return s
| BugsInPy/BugsInPy/temp/projects/scrapy/bug-29-fixed/scrapy/scrapy/utils/request.py,BugsInPy/BugsInPy/temp/projects/scrapy/bug-29-buggy/scrapy/scrapy/utils/request.py |
tqdm-bug-5 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
# Create global parallelism locks to avoid racing issues with parallel bars
# works only if fork available (Linux, MacOSX, but not on Windows)
try:
mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
mp_lock = None
except OSError: # pragma: no cover
mp_lock = None
try:
th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
th_lock = None
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
global mp_lock, th_lock
self.locks = [lk for lk in [mp_lock, th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
_lock = TqdmDefaultWriteLock()
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
total *= unit_scale
n *= unit_scale
unit_scale = False
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
l_bar += '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': inv_rate if inv_rate and inv_rate > 1
else rate,
'rate_fmt': rate_fmt,
'rate_noinv': rate,
'rate_noinv_fmt': rate_noinv_fmt,
'rate_inv': inv_rate,
'rate_inv_fmt': rate_inv_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix or '',
'postfix': postfix,
# 'bar': full_bar # replaced by procedure below
}
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**bar_args)
r_bar = r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance"""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
if not instance.gui: # pragma: no cover
raise
else:
for inst in cls._instances:
# negative `pos` means fixed
if inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls._lock.acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
cls._lock = lock
@classmethod
def get_lock(cls):
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress display update interval, in seconds [default: 10].
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if file is None:
file = sys.stderr
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (TqdmDeprecationWarning("""\
`nested` is deprecated and automated. Use position instead for manual control.
""", fp_write=getattr(file, 'write', sys.stderr.write)) if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
self.sp(self.__repr__(elapsed=0))
if self.pos:
self.moveto(-abs(self.pos))
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self, elapsed=None):
return self.format_meter(
self.n, self.total,
elapsed if elapsed is not None else self._time() - self.start_t,
self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols,
self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time if self.avg_time else None,
self.bar_format, self.postfix, self.unit_divisor)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
self.n = n
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * \
(mininterval / delta_t
if mininterval and delta_t else 1) + \
(1 - smoothing) * miniters
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
self.sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.sp(self.__repr__())
if pos:
self.moveto(-pos)
elif not max([abs(getattr(i, "pos", 0))
for i in self._instances] + [0]):
# only if not nested (#477)
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""
Set/modify description without ': ' appended.
"""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = '{0:2.3g}'.format(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
def clear(self, nolock=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""
Force refresh the display of this bar
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp(self.__repr__())
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
# Create global parallelism locks to avoid racing issues with parallel bars
# works only if fork available (Linux, MacOSX, but not on Windows)
try:
mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
mp_lock = None
except OSError: # pragma: no cover
mp_lock = None
try:
th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
th_lock = None
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
global mp_lock, th_lock
self.locks = [lk for lk in [mp_lock, th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
_lock = TqdmDefaultWriteLock()
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
total *= unit_scale
n *= unit_scale
unit_scale = False
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
l_bar += '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': inv_rate if inv_rate and inv_rate > 1
else rate,
'rate_fmt': rate_fmt,
'rate_noinv': rate,
'rate_noinv_fmt': rate_noinv_fmt,
'rate_inv': inv_rate,
'rate_inv_fmt': rate_inv_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix or '',
'postfix': postfix,
# 'bar': full_bar # replaced by procedure below
}
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**bar_args)
r_bar = r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance"""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
if not instance.gui: # pragma: no cover
raise
else:
for inst in cls._instances:
# negative `pos` means fixed
if inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls._lock.acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
cls._lock = lock
@classmethod
def get_lock(cls):
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress display update interval, in seconds [default: 10].
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if file is None:
file = sys.stderr
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (TqdmDeprecationWarning("""\
`nested` is deprecated and automated. Use position instead for manual control.
""", fp_write=getattr(file, 'write', sys.stderr.write)) if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
self.sp(self.__repr__(elapsed=0))
if self.pos:
self.moveto(-abs(self.pos))
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self, elapsed=None):
return self.format_meter(
self.n, self.total,
elapsed if elapsed is not None else self._time() - self.start_t,
self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols,
self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time if self.avg_time else None,
self.bar_format, self.postfix, self.unit_divisor)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
self.n = n
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * \
(mininterval / delta_t
if mininterval and delta_t else 1) + \
(1 - smoothing) * miniters
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
self.sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.sp(self.__repr__())
if pos:
self.moveto(-pos)
elif not max([abs(getattr(i, "pos", 0))
for i in self._instances] + [0]):
# only if not nested (#477)
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""
Set/modify description without ': ' appended.
"""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = '{0:2.3g}'.format(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
def clear(self, nolock=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""
Force refresh the display of this bar
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp(self.__repr__())
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-5-fixed/tqdm/tqdm/_tqdm.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-5-buggy/tqdm/tqdm/_tqdm.py |
tqdm-bug-3 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable, RE_ANSI, _is_ascii, SimpleTextIOWrapper
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
cls.mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
ASCII_FMT = " 123456789#"
UTF_FMT = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s,
remaining, remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
desc=prefix or '', postfix=postfix, unit=unit,
# bar=full_bar, # replaced by procedure below
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
format_dict.update(l_bar=l_bar, percentage=percentage)
# , bar=full_bar # replaced by procedure below
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**format_dict)
r_bar = r_bar_user.format(**format_dict)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**format_dict)
# Formatting progress bar space available for bar's display
if ncols:
N_BARS = max(1, ncols - len(RE_ANSI.sub('', l_bar + r_bar)))
else:
N_BARS = 10
# format bar depending on availability of unicode/ascii chars
if ascii is True:
ascii = ASCII_FMT
elif ascii is False:
ascii = UTF_FMT
nsyms = len(ascii) - 1
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * nsyms), nsyms)
bar = ascii[-1] * bar_length
frac_bar = ascii[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
ascii[0] * (N_BARS - bar_length - 1)
else:
full_bar = bar + \
ascii[0] * (N_BARS - bar_length)
# Piece together the bar parts
return l_bar + full_bar + r_bar
elif bar_format:
# user-specified bar_format but no total
return bar_format.format(bar='?', **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
# Construct the lock if it does not exist
with cls.get_lock():
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s, remaining,
remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
from textwrap import dedent
raise (TqdmDeprecationWarning(dedent("""\
`nested` is deprecated and automated.
Use `position` instead for manual control.
"""), fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
self.display()
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
if not hasattr(self, 'sp'):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=pos)
if not max([abs(getattr(i, "pos", 0))
for i in self._instances] + [pos]):
# only if not nested (#477)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""Force refresh the display of this bar."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable, RE_ANSI, _is_ascii, SimpleTextIOWrapper
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
cls.mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
ASCII_FMT = " 123456789#"
UTF_FMT = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s,
remaining, remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
desc=prefix or '', postfix=postfix, unit=unit,
# bar=full_bar, # replaced by procedure below
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
format_dict.update(l_bar=l_bar, percentage=percentage)
# , bar=full_bar # replaced by procedure below
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**format_dict)
r_bar = r_bar_user.format(**format_dict)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**format_dict)
# Formatting progress bar space available for bar's display
if ncols:
N_BARS = max(1, ncols - len(RE_ANSI.sub('', l_bar + r_bar)))
else:
N_BARS = 10
# format bar depending on availability of unicode/ascii chars
if ascii is True:
ascii = ASCII_FMT
elif ascii is False:
ascii = UTF_FMT
nsyms = len(ascii) - 1
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * nsyms), nsyms)
bar = ascii[-1] * bar_length
frac_bar = ascii[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
ascii[0] * (N_BARS - bar_length - 1)
else:
full_bar = bar + \
ascii[0] * (N_BARS - bar_length)
# Piece together the bar parts
return l_bar + full_bar + r_bar
elif bar_format:
# user-specified bar_format but no total
return bar_format.format(bar='?', **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
# Construct the lock if it does not exist
with cls.get_lock():
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s, remaining,
remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
from textwrap import dedent
raise (TqdmDeprecationWarning(dedent("""\
`nested` is deprecated and automated.
Use `position` instead for manual control.
"""), fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
self.display()
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('Boolean cast is undefined'
' for tqdm objects that have no iterable or total')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
if not hasattr(self, 'sp'):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=pos)
if not max([abs(getattr(i, "pos", 0))
for i in self._instances] + [pos]):
# only if not nested (#477)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""Force refresh the display of this bar."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-3-fixed/tqdm/tqdm/_tqdm.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-3-buggy/tqdm/tqdm/_tqdm.py |
tqdm-bug-6 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
# Create global parallelism locks to avoid racing issues with parallel bars
# works only if fork available (Linux, MacOSX, but not on Windows)
try:
mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
mp_lock = None
except OSError: # pragma: no cover
mp_lock = None
try:
th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
th_lock = None
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
global mp_lock, th_lock
self.locks = [lk for lk in [mp_lock, th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
_lock = TqdmDefaultWriteLock()
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
total *= unit_scale
n *= unit_scale
unit_scale = False
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
l_bar += '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': inv_rate if inv_rate and inv_rate > 1
else rate,
'rate_fmt': rate_fmt,
'rate_noinv': rate,
'rate_noinv_fmt': rate_noinv_fmt,
'rate_inv': inv_rate,
'rate_inv_fmt': rate_inv_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix or '',
'postfix': postfix,
# 'bar': full_bar # replaced by procedure below
}
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**bar_args)
r_bar = r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance"""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance)
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
if not instance.gui: # pragma: no cover
raise
else:
for inst in cls._instances:
# negative `pos` means fixed
if inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls._lock.acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp)):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
# Avoid race conditions by checking that the instance started
if hasattr(inst, 'start_t'): # pragma: nocover
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
cls._lock = lock
@classmethod
def get_lock(cls):
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.groupby import SeriesGroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby import PanelGroupBy
from pandas import Panel
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = getattr(df, 'ngroups', None)
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
else: # DataFrame or Panel
axis = kwargs.get('axis', 0)
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if t.total and t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress display update interval, in seconds [default: 10].
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if file is None:
file = sys.stderr
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (TqdmDeprecationWarning("""\
`nested` is deprecated and automated. Use position instead for manual control.
""", fp_write=getattr(file, 'write', sys.stderr.write)) if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
self.sp(self.__repr__(elapsed=0))
if self.pos:
self.moveto(-abs(self.pos))
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else self.total)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self, elapsed=None):
return self.format_meter(
self.n, self.total,
elapsed if elapsed is not None else self._time() - self.start_t,
self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols,
self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time if self.avg_time else None,
self.bar_format, self.postfix, self.unit_divisor)
def __lt__(self, other):
return abs(self.pos) < abs(other.pos)
def __le__(self, other):
return (self < other) or (self == other)
def __eq__(self, other):
return abs(self.pos) == abs(other.pos)
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
self.n = n
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * \
(mininterval / delta_t
if mininterval and delta_t else 1) + \
(1 - smoothing) * miniters
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
self.sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.sp(self.__repr__())
if pos:
self.moveto(-pos)
else:
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""
Set/modify description without ': ' appended.
"""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = '{0:2.3g}'.format(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
def clear(self, nolock=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""
Force refresh the display of this bar
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp(self.__repr__())
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
# Create global parallelism locks to avoid racing issues with parallel bars
# works only if fork available (Linux, MacOSX, but not on Windows)
try:
mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
mp_lock = None
except OSError: # pragma: no cover
mp_lock = None
try:
th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
th_lock = None
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
global mp_lock, th_lock
self.locks = [lk for lk in [mp_lock, th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
_lock = TqdmDefaultWriteLock()
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
total *= unit_scale
n *= unit_scale
unit_scale = False
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
l_bar += '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': inv_rate if inv_rate and inv_rate > 1
else rate,
'rate_fmt': rate_fmt,
'rate_noinv': rate,
'rate_noinv_fmt': rate_noinv_fmt,
'rate_inv': inv_rate,
'rate_inv_fmt': rate_inv_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix or '',
'postfix': postfix,
# 'bar': full_bar # replaced by procedure below
}
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**bar_args)
r_bar = r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance"""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance)
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
if not instance.gui: # pragma: no cover
raise
else:
for inst in cls._instances:
# negative `pos` means fixed
if inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls._lock.acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp)):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
# Avoid race conditions by checking that the instance started
if hasattr(inst, 'start_t'): # pragma: nocover
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
cls._lock = lock
@classmethod
def get_lock(cls):
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.groupby import SeriesGroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby import PanelGroupBy
from pandas import Panel
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = getattr(df, 'ngroups', None)
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
else: # DataFrame or Panel
axis = kwargs.get('axis', 0)
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if t.total and t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress display update interval, in seconds [default: 10].
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if file is None:
file = sys.stderr
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (TqdmDeprecationWarning("""\
`nested` is deprecated and automated. Use position instead for manual control.
""", fp_write=getattr(file, 'write', sys.stderr.write)) if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
self.sp(self.__repr__(elapsed=0))
if self.pos:
self.moveto(-abs(self.pos))
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self, elapsed=None):
return self.format_meter(
self.n, self.total,
elapsed if elapsed is not None else self._time() - self.start_t,
self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols,
self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time if self.avg_time else None,
self.bar_format, self.postfix, self.unit_divisor)
def __lt__(self, other):
return abs(self.pos) < abs(other.pos)
def __le__(self, other):
return (self < other) or (self == other)
def __eq__(self, other):
return abs(self.pos) == abs(other.pos)
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
self.n = n
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * \
(mininterval / delta_t
if mininterval and delta_t else 1) + \
(1 - smoothing) * miniters
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
self.sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.sp(self.__repr__())
if pos:
self.moveto(-pos)
else:
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""
Set/modify description without ': ' appended.
"""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = '{0:2.3g}'.format(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
def clear(self, nolock=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""
Force refresh the display of this bar
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp(self.__repr__())
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-6-fixed/tqdm/tqdm/_tqdm.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-6-buggy/tqdm/tqdm/_tqdm.py |
tqdm-bug-9 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols, _range, _unich
import sys
from time import time
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange', 'format_interval', 'format_meter']
def format_sizeof(num, suffix=''):
"""
Formats a number (greater than unity) with SI Order of Magnitude prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
if abs(num) < 100.0:
if abs(num) < 10.0:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= 1000.0
return '{0:3.1f}Y'.format(num) + suffix
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only basic
progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified, dynamically
resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress bar
+ no limit for the iterations counter and statistics. If 0, will not
print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will printed with an appropriate
SI metric prefix (K = 10^3, M = 10^6, etc.) [default: False].
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# in case the total is wrong (n is above the total), then
# we switch to the mode without showing the total prediction
# (since ETA would be wrong anyway)
if total and n > total:
total = None
elapsed_str = format_interval(elapsed)
rate_fmt = ((format_sizeof(n / elapsed) if unit_scale else
'{0:5.2f}'.format(n / elapsed)) if elapsed else
'?') \
+ unit + '/s'
if unit_scale:
n_fmt = format_sizeof(n)
total_fmt = format_sizeof(total) if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
if total:
frac = n / total
percentage = frac * 100
remaining_str = format_interval(elapsed * (total-n) / n) if n else '?'
l_bar = (prefix if prefix else '') + '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#'*bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588)*bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0) # bar end padding
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0) # bar end padding
return l_bar + full_bar + r_bar
else: # no progressbar nor ETA, just progress statistics
return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
n_fmt, unit, elapsed_str, rate_fmt)
def StatusPrinter(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place updating
may not work (it will print a new line at each refresh).
"""
fp = file
last_printed_len = [0] # closure over mutable variable (fast)
def print_status(s):
len_s = len(s)
fp.write('\r' + s + ' '*max(last_printed_len[0] - len_s, 0))
fp.flush()
last_printed_len[0] = len_s
return print_status
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the orignal iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
def __init__(self, iterable=None, desc=None, total=None, leave=False,
file=sys.stderr, ncols=None, mininterval=0.1,
miniters=None, ascii=None, disable=False,
unit='it', unit_scale=False, gui=False):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank [default: None] to manually manage the updates.
desc : str, optional
Prefix for the progressbar [default: None].
total : int, optional
The number of expected iterations. If not given, len(iterable) is
used if possible. As a last resort, only basic progress
statistics are displayed (no ETA, no progressbar). If `gui` is
True and this parameter needs subsequent updating, specify an
initial arbitrary large positive integer, e.g. int(9e9).
leave : bool, optional
If [default: False], removes all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified, dynamically
resizes the progressbar to stay within this bound
[default: None]. The fallback is a meter width of 10 and no
limit for the counter and statistics. If 0, will not print any
meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
miniters : int, optional
Minimum progress update interval, in iterations [default: None].
If specified, will set `mininterval` to 0.
ascii : bool, optional
If [default: None] or false, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool
Whether to disable the entire progressbar wrapper [default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
gui : bool, optional
If set, will attempt to use matplotlib animations for a
graphical output [default: false].
Returns
-------
out : decorated iterator.
"""
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if (ncols is None) and (file in (sys.stderr, sys.stdout)):
ncols = _environ_cols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
mininterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if gui: # pragma: no cover
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
except ImportError:
gui = False
else:
self.mpl = mpl
self.plt = plt
# Store the arguments
self.iterable = iterable
self.desc = desc+': ' if desc else ''
self.total = total
self.leave = leave
self.file = file
self.ncols = ncols
self.mininterval = mininterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
if gui: # pragma: no cover
# Initialize the GUI display
if not disable:
file.write('Warning: GUI is experimental/alpha\n')
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
if total:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if total:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((unit if unit else 'it') + '/s')
if unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
else:
# Initialize the screen printer
self.sp = StatusPrinter(self.file)
if not disable:
self.sp(format_meter(
0, total, 0, ncols, self.desc, ascii, unit, unit_scale))
# Init the time/iterations counters
self.start_t = self.last_print_t = time()
self.last_print_n = 0
self.n = 0
def __len__(self):
return len(self.iterable)
def __iter__(self):
''' Backward-compatibility to use: for x in tqdm(iterable) '''
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
gui = self.gui
if gui: # pragma: no cover
plt = self.plt
ax = self.ax
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
line1 = self.line1
line2 = self.line2
else:
sp = self.sp
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
if gui: # pragma: no cover
# Inline due to multiple calls
total = self.total
# instantaneous rate
y = delta_it / delta_t
# smoothed rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(format_meter(
n, total, elapsed, 0,
self.desc, ascii, unit, unit_scale),
fontname="DejaVu Sans Mono",
fontsize=11)
plt.pause(1e-9)
else:
sp(format_meter(
n, self.total, elapsed, ncols,
self.desc, ascii, unit, unit_scale))
if dynamic_miniters:
miniters = max(miniters, delta_it)
last_print_n = n
last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 1:
n = 1
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
if self.gui: # pragma: no cover
# Inline due to multiple calls
total = self.total
ax = self.ax
# instantaneous rate
y = delta_it / delta_t
# smoothed rate
z = self.n / elapsed
# update line data
self.xdata.append(self.n * 100.0 / total
if total else cur_t)
self.ydata.append(y)
self.zdata.append(z)
# Discard old values
if (not total) and elapsed > 66:
self.xdata.popleft()
self.ydata.popleft()
self.zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
self.line1.set_data(self.xdata, self.ydata)
self.line2.set_data(self.xdata, self.zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [self.n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in self.xdata]
self.line1.set_data(t_ago, self.ydata)
self.line2.set_data(t_ago, self.zdata)
ax.set_title(format_meter(
self.n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale),
fontname="DejaVu Sans Mono",
fontsize=11)
self.plt.pause(1e-9)
else:
self.sp(format_meter(
self.n, self.total, elapsed, self.ncols,
self.desc, self.ascii, self.unit, self.unit_scale))
if self.dynamic_miniters:
self.miniters = max(self.miniters, delta_it)
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
if self.gui: # pragma: no cover
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
else:
if self.leave:
if self.last_print_n < self.n:
cur_t = time()
self.sp(format_meter(
self.n, self.total, cur_t-self.start_t, self.ncols,
self.desc, self.ascii, self.unit, self.unit_scale))
self.file.write('\n')
else:
self.sp('')
self.file.write('\r')
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols, _range, _unich
import sys
from time import time
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange', 'format_interval', 'format_meter']
def format_sizeof(num, suffix=''):
"""
Formats a number (greater than unity) with SI Order of Magnitude prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= 1000.0
return '{0:3.1f}Y'.format(num) + suffix
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only basic
progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified, dynamically
resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress bar
+ no limit for the iterations counter and statistics. If 0, will not
print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will printed with an appropriate
SI metric prefix (K = 10^3, M = 10^6, etc.) [default: False].
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# in case the total is wrong (n is above the total), then
# we switch to the mode without showing the total prediction
# (since ETA would be wrong anyway)
if total and n > total:
total = None
elapsed_str = format_interval(elapsed)
rate_fmt = ((format_sizeof(n / elapsed) if unit_scale else
'{0:5.2f}'.format(n / elapsed)) if elapsed else
'?') \
+ unit + '/s'
if unit_scale:
n_fmt = format_sizeof(n)
total_fmt = format_sizeof(total) if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
if total:
frac = n / total
percentage = frac * 100
remaining_str = format_interval(elapsed * (total-n) / n) if n else '?'
l_bar = (prefix if prefix else '') + '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#'*bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588)*bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0) # bar end padding
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0) # bar end padding
return l_bar + full_bar + r_bar
else: # no progressbar nor ETA, just progress statistics
return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
n_fmt, unit, elapsed_str, rate_fmt)
def StatusPrinter(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place updating
may not work (it will print a new line at each refresh).
"""
fp = file
last_printed_len = [0] # closure over mutable variable (fast)
def print_status(s):
len_s = len(s)
fp.write('\r' + s + ' '*max(last_printed_len[0] - len_s, 0))
fp.flush()
last_printed_len[0] = len_s
return print_status
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the orignal iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
def __init__(self, iterable=None, desc=None, total=None, leave=False,
file=sys.stderr, ncols=None, mininterval=0.1,
miniters=None, ascii=None, disable=False,
unit='it', unit_scale=False, gui=False):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank [default: None] to manually manage the updates.
desc : str, optional
Prefix for the progressbar [default: None].
total : int, optional
The number of expected iterations. If not given, len(iterable) is
used if possible. As a last resort, only basic progress
statistics are displayed (no ETA, no progressbar). If `gui` is
True and this parameter needs subsequent updating, specify an
initial arbitrary large positive integer, e.g. int(9e9).
leave : bool, optional
If [default: False], removes all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified, dynamically
resizes the progressbar to stay within this bound
[default: None]. The fallback is a meter width of 10 and no
limit for the counter and statistics. If 0, will not print any
meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
miniters : int, optional
Minimum progress update interval, in iterations [default: None].
If specified, will set `mininterval` to 0.
ascii : bool, optional
If [default: None] or false, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool
Whether to disable the entire progressbar wrapper [default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
gui : bool, optional
If set, will attempt to use matplotlib animations for a
graphical output [default: false].
Returns
-------
out : decorated iterator.
"""
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if (ncols is None) and (file in (sys.stderr, sys.stdout)):
ncols = _environ_cols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
mininterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if gui: # pragma: no cover
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
except ImportError:
gui = False
else:
self.mpl = mpl
self.plt = plt
# Store the arguments
self.iterable = iterable
self.desc = desc+': ' if desc else ''
self.total = total
self.leave = leave
self.file = file
self.ncols = ncols
self.mininterval = mininterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
if gui: # pragma: no cover
# Initialize the GUI display
if not disable:
file.write('Warning: GUI is experimental/alpha\n')
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
if total:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if total:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((unit if unit else 'it') + '/s')
if unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
else:
# Initialize the screen printer
self.sp = StatusPrinter(self.file)
if not disable:
self.sp(format_meter(
0, total, 0, ncols, self.desc, ascii, unit, unit_scale))
# Init the time/iterations counters
self.start_t = self.last_print_t = time()
self.last_print_n = 0
self.n = 0
def __len__(self):
return len(self.iterable) if self.iterable else self.total
def __iter__(self):
''' Backward-compatibility to use: for x in tqdm(iterable) '''
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
gui = self.gui
if gui: # pragma: no cover
plt = self.plt
ax = self.ax
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
line1 = self.line1
line2 = self.line2
else:
sp = self.sp
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
if gui: # pragma: no cover
# Inline due to multiple calls
total = self.total
# instantaneous rate
y = delta_it / delta_t
# smoothed rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(format_meter(
n, total, elapsed, 0,
self.desc, ascii, unit, unit_scale),
fontname="DejaVu Sans Mono",
fontsize=11)
plt.pause(1e-9)
else:
sp(format_meter(
n, self.total, elapsed, ncols,
self.desc, ascii, unit, unit_scale))
if dynamic_miniters:
miniters = max(miniters, delta_it)
last_print_n = n
last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 1:
n = 1
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
if self.gui: # pragma: no cover
# Inline due to multiple calls
total = self.total
ax = self.ax
# instantaneous rate
y = delta_it / delta_t
# smoothed rate
z = self.n / elapsed
# update line data
self.xdata.append(self.n * 100.0 / total
if total else cur_t)
self.ydata.append(y)
self.zdata.append(z)
# Discard old values
if (not total) and elapsed > 66:
self.xdata.popleft()
self.ydata.popleft()
self.zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
self.line1.set_data(self.xdata, self.ydata)
self.line2.set_data(self.xdata, self.zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [self.n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in self.xdata]
self.line1.set_data(t_ago, self.ydata)
self.line2.set_data(t_ago, self.zdata)
ax.set_title(format_meter(
self.n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale),
fontname="DejaVu Sans Mono",
fontsize=11)
self.plt.pause(1e-9)
else:
self.sp(format_meter(
self.n, self.total, elapsed, self.ncols,
self.desc, self.ascii, self.unit, self.unit_scale))
if self.dynamic_miniters:
self.miniters = max(self.miniters, delta_it)
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
if self.gui: # pragma: no cover
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
else:
if self.leave:
if self.last_print_n < self.n:
cur_t = time()
self.sp(format_meter(
self.n, self.total, cur_t-self.start_t, self.ncols,
self.desc, self.ascii, self.unit, self.unit_scale))
self.file.write('\n')
else:
self.sp('')
self.file.write('\r')
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-9-fixed/tqdm/tqdm/_tqdm.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-9-buggy/tqdm/tqdm/_tqdm.py |
tqdm-bug-1 | """
Thin wrappers around common functions.
Subpackages contain potentially unstable extensions.
"""
from tqdm import tqdm
from tqdm.auto import tqdm as tqdm_auto
from tqdm.utils import ObjectWrapper
from copy import deepcopy
import functools
import sys
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['tenumerate', 'tzip', 'tmap']
class DummyTqdmFile(ObjectWrapper):
"""Dummy file-like that will write to tqdm"""
def write(self, x, nolock=False):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self._wrapped, nolock=nolock)
def tenumerate(iterable, start=0, total=None, tqdm_class=tqdm_auto,
**tqdm_kwargs):
"""
Equivalent of `numpy.ndenumerate` or builtin `enumerate`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
try:
import numpy as np
except ImportError:
pass
else:
if isinstance(iterable, np.ndarray):
return tqdm_class(np.ndenumerate(iterable),
total=total or len(iterable), **tqdm_kwargs)
return enumerate(tqdm_class(iterable, start, **tqdm_kwargs))
def _tzip(iter1, *iter2plus, **tqdm_kwargs):
"""
Equivalent of builtin `zip`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
kwargs = deepcopy(tqdm_kwargs)
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
for i in zip(tqdm_class(iter1, **tqdm_kwargs), *iter2plus):
yield i
def _tmap(function, *sequences, **tqdm_kwargs):
"""
Equivalent of builtin `map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
for i in _tzip(*sequences, **tqdm_kwargs):
yield function(*i)
if sys.version_info[:1] < (3,):
@functools.wraps(_tzip)
def tzip(*args, **kwargs):
return list(_tzip(*args, **kwargs))
@functools.wraps(_tmap)
def tmap(*args, **kwargs):
return list(_tmap(*args, **kwargs))
else:
tzip = _tzip
tmap = _tmap
"""
Thin wrappers around common functions.
Subpackages contain potentially unstable extensions.
"""
from tqdm import tqdm
from tqdm.auto import tqdm as tqdm_auto
from tqdm.utils import ObjectWrapper
from copy import deepcopy
import functools
import sys
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['tenumerate', 'tzip', 'tmap']
class DummyTqdmFile(ObjectWrapper):
"""Dummy file-like that will write to tqdm"""
def write(self, x, nolock=False):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self._wrapped, nolock=nolock)
def tenumerate(iterable, start=0, total=None, tqdm_class=tqdm_auto,
**tqdm_kwargs):
"""
Equivalent of `numpy.ndenumerate` or builtin `enumerate`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
try:
import numpy as np
except ImportError:
pass
else:
if isinstance(iterable, np.ndarray):
return tqdm_class(np.ndenumerate(iterable),
total=total or len(iterable), **tqdm_kwargs)
return enumerate(tqdm_class(iterable, **tqdm_kwargs), start)
def _tzip(iter1, *iter2plus, **tqdm_kwargs):
"""
Equivalent of builtin `zip`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
kwargs = deepcopy(tqdm_kwargs)
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
for i in zip(tqdm_class(iter1, **tqdm_kwargs), *iter2plus):
yield i
def _tmap(function, *sequences, **tqdm_kwargs):
"""
Equivalent of builtin `map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
for i in _tzip(*sequences, **tqdm_kwargs):
yield function(*i)
if sys.version_info[:1] < (3,):
@functools.wraps(_tzip)
def tzip(*args, **kwargs):
return list(_tzip(*args, **kwargs))
@functools.wraps(_tmap)
def tmap(*args, **kwargs):
return list(_tmap(*args, **kwargs))
else:
tzip = _tzip
tmap = _tmap
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-1-fixed/tqdm/tqdm/contrib/__init__.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-1-buggy/tqdm/tqdm/contrib/__init__.py |
tqdm-bug-7 | from ._tqdm import tqdm, TqdmTypeError, TqdmKeyError
from ._version import __version__ # NOQA
import sys
import re
import logging
__all__ = ["main"]
def cast(val, typ):
log = logging.getLogger(__name__)
log.debug((val, typ))
if " or " in typ:
for t in typ.split(" or "):
try:
return cast(val, t)
except TqdmTypeError:
pass
raise TqdmTypeError(val + ' : ' + typ)
# sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n')
if typ == 'bool':
if (val == 'True') or (val == ''):
return True
elif val == 'False':
return False
else:
raise TqdmTypeError(val + ' : ' + typ)
try:
return eval(typ + '("' + val + '")')
except:
if typ == 'chr':
return chr(ord(eval('"' + val + '"')))
else:
raise TqdmTypeError(val + ' : ' + typ)
def posix_pipe(fin, fout, delim='\n', buf_size=256,
callback=lambda int: None # pragma: no cover
):
"""
Params
------
fin : file with `read(buf_size : int)` method
fout : file with `write` (and optionally `flush`) methods.
callback : function(int), e.g.: `tqdm.update`
"""
fp_write = fout.write
# tmp = ''
if not delim:
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
getattr(fout, 'flush', lambda: None)() # pragma: no cover
return
fp_write(tmp)
callback(len(tmp))
# return
buf = ''
# n = 0
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
if buf:
fp_write(buf)
callback(1 + buf.count(delim)) # n += 1 + buf.count(delim)
getattr(fout, 'flush', lambda: None)() # pragma: no cover
return # n
while True:
try:
i = tmp.index(delim)
except ValueError:
buf += tmp
break
else:
fp_write(buf + tmp[:i + len(delim)])
callback(1) # n += 1
buf = ''
tmp = tmp[i + len(delim):]
# ((opt, type), ... )
RE_OPTS = re.compile(r'\n {8}(\S+)\s{2,}:\s*([^,]+)')
# better split method assuming no positional args
RE_SHLEX = re.compile(r'\s*--?([^\s=]+)(?:\s*|=|$)')
# TODO: add custom support for some of the following?
UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file')
# The 8 leading spaces are required for consistency
CLI_EXTRA_DOC = r"""
Extra CLI Options
-----------------
name : type, optional
TODO: find out why this is needed.
delim : chr, optional
Delimiting character [default: '\n']. Use '\0' for null.
N.B.: on Windows systems, Python converts '\n' to '\r\n'.
buf_size : int, optional
String buffer size in bytes [default: 256]
used when `delim` is specified.
bytes : bool, optional
If true, will count bytes, ignore `delim`, and default
`unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'.
log : str, optional
CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET.
"""
def main(fp=sys.stderr):
"""
Paramters (internal use only)
---------
fp : file-like object for tqdm
"""
try:
log = sys.argv.index('--log')
except ValueError:
logLevel = 'INFO'
else:
# sys.argv.pop(log)
# logLevel = sys.argv.pop(log)
logLevel = sys.argv[log + 1]
logging.basicConfig(level=getattr(logging, logLevel))
log = logging.getLogger(__name__)
d = tqdm.__init__.__doc__ + CLI_EXTRA_DOC
opt_types = dict(RE_OPTS.findall(d))
# opt_types['delim'] = 'chr'
for o in UNSUPPORTED_OPTS:
opt_types.pop(o)
log.debug(sorted(opt_types.items()))
# d = RE_OPTS.sub(r' --\1=<\1> : \2', d)
split = RE_OPTS.split(d)
opt_types_desc = zip(split[1::3], split[2::3], split[3::3])
d = ''.join('\n --{0}=<{0}> : {1}{2}'.format(*otd)
for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS)
d = """Usage:
tqdm [--help | options]
Options:
-h, --help Print this help and exit
-v, --version Print version and exit
""" + d.strip('\n') + '\n'
# opts = docopt(d, version=__version__)
if any(v in sys.argv for v in ('-v', '--version')):
sys.stdout.write(__version__ + '\n')
sys.exit(0)
elif any(v in sys.argv for v in ('-h', '--help')):
sys.stdout.write(d + '\n')
sys.exit(0)
argv = RE_SHLEX.split(' '.join(["tqdm"] + sys.argv[1:]))
opts = dict(zip(argv[1::2], argv[2::2]))
log.debug(opts)
opts.pop('log', True)
tqdm_args = {'file': fp}
try:
for (o, v) in opts.items():
try:
tqdm_args[o] = cast(v, opt_types[o])
except KeyError as e:
raise TqdmKeyError(str(e))
log.debug('args:' + str(tqdm_args))
except:
fp.write('\nError:\nUsage:\n tqdm [--help | options]\n')
for i in sys.stdin:
sys.stdout.write(i)
raise
else:
buf_size = tqdm_args.pop('buf_size', 256)
delim = tqdm_args.pop('delim', '\n')
delim_per_char = tqdm_args.pop('bytes', False)
if delim_per_char:
tqdm_args.setdefault('unit', 'B')
tqdm_args.setdefault('unit_scale', True)
tqdm_args.setdefault('unit_divisor', 1024)
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(sys.stdin, sys.stdout,
'', buf_size, t.update)
elif delim == '\n':
log.debug(tqdm_args)
for i in tqdm(sys.stdin, **tqdm_args):
sys.stdout.write(i)
else:
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(sys.stdin, sys.stdout,
delim, buf_size, t.update)
from ._tqdm import tqdm, TqdmTypeError, TqdmKeyError
from ._version import __version__ # NOQA
import sys
import re
import logging
__all__ = ["main"]
def cast(val, typ):
log = logging.getLogger(__name__)
log.debug((val, typ))
if " or " in typ:
for t in typ.split(" or "):
try:
return cast(val, t)
except TqdmTypeError:
pass
raise TqdmTypeError(val + ' : ' + typ)
# sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n')
if typ == 'bool':
if (val == 'True') or (val == ''):
return True
elif val == 'False':
return False
else:
raise TqdmTypeError(val + ' : ' + typ)
try:
return eval(typ + '("' + val + '")')
except:
if typ == 'chr':
return chr(ord(eval('"' + val + '"')))
else:
raise TqdmTypeError(val + ' : ' + typ)
def posix_pipe(fin, fout, delim='\n', buf_size=256,
callback=lambda int: None # pragma: no cover
):
"""
Params
------
fin : file with `read(buf_size : int)` method
fout : file with `write` (and optionally `flush`) methods.
callback : function(int), e.g.: `tqdm.update`
"""
fp_write = fout.write
# tmp = ''
if not delim:
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
getattr(fout, 'flush', lambda: None)() # pragma: no cover
return
fp_write(tmp)
callback(len(tmp))
# return
buf = ''
# n = 0
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
if buf:
fp_write(buf)
callback(1 + buf.count(delim)) # n += 1 + buf.count(delim)
getattr(fout, 'flush', lambda: None)() # pragma: no cover
return # n
while True:
try:
i = tmp.index(delim)
except ValueError:
buf += tmp
break
else:
fp_write(buf + tmp[:i + len(delim)])
callback(1) # n += 1
buf = ''
tmp = tmp[i + len(delim):]
# ((opt, type), ... )
RE_OPTS = re.compile(r'\n {8}(\S+)\s{2,}:\s*([^,]+)')
# better split method assuming no positional args
RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(?:\s*|=|$)')
# TODO: add custom support for some of the following?
UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file')
# The 8 leading spaces are required for consistency
CLI_EXTRA_DOC = r"""
Extra CLI Options
-----------------
name : type, optional
TODO: find out why this is needed.
delim : chr, optional
Delimiting character [default: '\n']. Use '\0' for null.
N.B.: on Windows systems, Python converts '\n' to '\r\n'.
buf_size : int, optional
String buffer size in bytes [default: 256]
used when `delim` is specified.
bytes : bool, optional
If true, will count bytes, ignore `delim`, and default
`unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'.
log : str, optional
CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET.
"""
def main(fp=sys.stderr):
"""
Paramters (internal use only)
---------
fp : file-like object for tqdm
"""
try:
log = sys.argv.index('--log')
except ValueError:
logLevel = 'INFO'
else:
# sys.argv.pop(log)
# logLevel = sys.argv.pop(log)
logLevel = sys.argv[log + 1]
logging.basicConfig(level=getattr(logging, logLevel))
log = logging.getLogger(__name__)
d = tqdm.__init__.__doc__ + CLI_EXTRA_DOC
opt_types = dict(RE_OPTS.findall(d))
# opt_types['delim'] = 'chr'
for o in UNSUPPORTED_OPTS:
opt_types.pop(o)
log.debug(sorted(opt_types.items()))
# d = RE_OPTS.sub(r' --\1=<\1> : \2', d)
split = RE_OPTS.split(d)
opt_types_desc = zip(split[1::3], split[2::3], split[3::3])
d = ''.join('\n --{0}=<{0}> : {1}{2}'.format(*otd)
for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS)
d = """Usage:
tqdm [--help | options]
Options:
-h, --help Print this help and exit
-v, --version Print version and exit
""" + d.strip('\n') + '\n'
# opts = docopt(d, version=__version__)
if any(v in sys.argv for v in ('-v', '--version')):
sys.stdout.write(__version__ + '\n')
sys.exit(0)
elif any(v in sys.argv for v in ('-h', '--help')):
sys.stdout.write(d + '\n')
sys.exit(0)
argv = RE_SHLEX.split(' '.join(["tqdm"] + sys.argv[1:]))
opts = dict(zip(argv[1::2], argv[2::2]))
log.debug(opts)
opts.pop('log', True)
tqdm_args = {'file': fp}
try:
for (o, v) in opts.items():
try:
tqdm_args[o] = cast(v, opt_types[o])
except KeyError as e:
raise TqdmKeyError(str(e))
log.debug('args:' + str(tqdm_args))
except:
fp.write('\nError:\nUsage:\n tqdm [--help | options]\n')
for i in sys.stdin:
sys.stdout.write(i)
raise
else:
buf_size = tqdm_args.pop('buf_size', 256)
delim = tqdm_args.pop('delim', '\n')
delim_per_char = tqdm_args.pop('bytes', False)
if delim_per_char:
tqdm_args.setdefault('unit', 'B')
tqdm_args.setdefault('unit_scale', True)
tqdm_args.setdefault('unit_divisor', 1024)
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(sys.stdin, sys.stdout,
'', buf_size, t.update)
elif delim == '\n':
log.debug(tqdm_args)
for i in tqdm(sys.stdin, **tqdm_args):
sys.stdout.write(i)
else:
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(sys.stdin, sys.stdout,
delim, buf_size, t.update)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-7-fixed/tqdm/tqdm/_main.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-7-buggy/tqdm/tqdm/_main.py |
tqdm-bug-2 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import, division
# compatibility functions and utilities
from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable, _is_ascii, FormatReplace, disp_len, disp_trim, \
SimpleTextIOWrapper, CallbackIOWrapper
from ._monitor import TMonitor
# native libraries
from contextlib import contextmanager
import sys
from numbers import Number
from time import time
# For parallelism safety
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
BLANK = " "
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(
int(self.frac * N_BARS * nsyms), nsyms)
bar = charset[-1] * bar_length
frac_bar = charset[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
return bar + frac_bar + \
charset[0] * (N_BARS - bar_length - 1)
return bar
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
res = bar_format.format(bar=full_bar, **format_dict)
if ncols:
return disp_trim(res, ncols)
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar.BLANK)
res = bar_format.format(bar=full_bar, **format_dict)
if ncols:
return disp_trim(res, ncols)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.clear(nolock=True)
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import DataFrameGroupBy, \
SeriesGroupBy # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.lock_args = lock_args
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
if not hasattr(self, 'sp'):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of"
" `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=0)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
@classmethod
@contextmanager
def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with tclass(total=total, **tkwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import, division
# compatibility functions and utilities
from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable, _is_ascii, FormatReplace, disp_len, disp_trim, \
SimpleTextIOWrapper, CallbackIOWrapper
from ._monitor import TMonitor
# native libraries
from contextlib import contextmanager
import sys
from numbers import Number
from time import time
# For parallelism safety
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
BLANK = " "
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(
int(self.frac * N_BARS * nsyms), nsyms)
bar = charset[-1] * bar_length
frac_bar = charset[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
return bar + frac_bar + \
charset[0] * (N_BARS - bar_length - 1)
return bar
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar.BLANK)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.clear(nolock=True)
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import DataFrameGroupBy, \
SeriesGroupBy # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.lock_args = lock_args
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
if not hasattr(self, 'sp'):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of"
" `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=0)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
@classmethod
@contextmanager
def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with tclass(total=total, **tkwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-2-fixed/tqdm/tqdm/std.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-2-buggy/tqdm/tqdm/std.py |
tqdm-bug-4 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable, RE_ANSI
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
cls.mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s,
remaining, remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining = (total - n) / rate if rate else 0
remaining_str = format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
l_bar += '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
percentage=percentage,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
desc=prefix or '', postfix=postfix, unit=unit,
# bar=full_bar, # replaced by procedure below
**extra_kwargs)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**format_dict)
r_bar = r_bar_user.format(**format_dict)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**format_dict)
# Formatting progress bar space available for bar's display
if ncols:
N_BARS = max(1, ncols - len(RE_ANSI.sub('', l_bar + r_bar)))
else:
N_BARS = 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
# Construct the lock if it does not exist
with cls.get_lock():
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance"""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s, remaining,
remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if file is None:
file = sys.stderr
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
from textwrap import dedent
raise (TqdmDeprecationWarning(dedent("""\
`nested` is deprecated and automated.
Use `position` instead for manual control.
"""), fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
self.display()
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
@property
def format_dict(self):
"""Public API for read-only member access"""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
if not hasattr(self, 'sp'):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=pos)
if not max([abs(getattr(i, "pos", 0))
for i in self._instances] + [pos]):
# only if not nested (#477)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""
Set/modify description without ': ' appended.
"""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
def clear(self, nolock=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""
Force refresh the display of this bar
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
def display(self, msg=None, pos=None):
"""
Use `self.sp` and to display `msg` in the specified `pos`.
Parameters
----------
msg : what to display (default: repr(self))
pos : position to display in. (default: abs(self.pos))
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, \
Comparable, RE_ANSI
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
cls.mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s,
remaining, remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining = (total - n) / rate if rate else 0
remaining_str = format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
l_bar += '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
percentage=percentage,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
desc=prefix or '', postfix=postfix, unit=unit,
# bar=full_bar, # replaced by procedure below
**extra_kwargs)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**format_dict)
r_bar = r_bar_user.format(**format_dict)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**format_dict)
# Formatting progress bar space available for bar's display
if ncols:
N_BARS = max(1, ncols - len(RE_ANSI.sub('', l_bar + r_bar)))
else:
N_BARS = 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
# Construct the lock if it does not exist
with cls.get_lock():
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance"""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy, GroupBy, PanelGroupBy
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s, remaining,
remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if file is None:
file = sys.stderr
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
from textwrap import dedent
raise (TqdmDeprecationWarning(dedent("""\
`nested` is deprecated and automated.
Use `position` instead for manual control.
"""), fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
self.display()
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
@property
def format_dict(self):
"""Public API for read-only member access"""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
if not hasattr(self, 'sp'):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
from textwrap import dedent
raise TqdmDeprecationWarning(dedent("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
"""), fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
self.display()
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=pos)
if not max([abs(getattr(i, "pos", 0))
for i in self._instances] + [pos]):
# only if not nested (#477)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""
Set/modify description without ': ' appended.
"""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
def clear(self, nolock=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""
Force refresh the display of this bar
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
def display(self, msg=None, pos=None):
"""
Use `self.sp` and to display `msg` in the specified `pos`.
Parameters
----------
msg : what to display (default: repr(self))
pos : position to display in. (default: abs(self.pos))
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-4-fixed/tqdm/tqdm/_tqdm.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-4-buggy/tqdm/tqdm/_tqdm.py |
tqdm-bug-8 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet
import sys
from time import time
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange']
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
@staticmethod
def format_sizeof(num, suffix=''):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= 1000.0
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
if not getattr(fp, 'flush', False): # pragma: no cover
fp.flush = lambda: None
def fp_write(s):
fp.write(_unicode(s))
last_printed_len = [0] # closure over mutable variable (fast)
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
fp.flush()
last_printed_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='',
ascii=False, unit='it', unit_scale=False, rate=None,
bar_format=None):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will printed with an
appropriate SI metric prefix (K = 10^3, M = 10^6, etc.)
[default: False].
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if (rate and (rate < 1)) else None
format_sizeof = tqdm.format_sizeof
rate_fmt = ((format_sizeof(inv_rate if inv_rate else rate)
if unit_scale else
'{0:5.2f}'.format(inv_rate if inv_rate else rate))
if rate else '?') \
+ ('s' if inv_rate else unit) + '/' + (unit if inv_rate else 's')
if unit_scale:
n_fmt = format_sizeof(n)
total_fmt = format_sizeof(total) if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
l_bar = (prefix if prefix else '') + \
'{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': rate if inv_rate is None else inv_rate,
'rate_noinv': rate,
'rate_noinv_fmt': ((format_sizeof(rate)
if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + 'it/s',
'rate_fmt': rate_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix if prefix else '',
# 'bar': full_bar # replaced by procedure below
}
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar, r_bar = l_bar.format(**bar_args), r_bar.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
n_fmt, unit, elapsed_str, rate_fmt)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
cls._instances.add(instance)
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
""" Skips specified instance """
try:
return max(inst.pos for inst in cls._instances
if inst is not instance) + 1
except ValueError as e:
if "arg is an empty sequence" in str(e):
return 0
raise # pragma: no cover
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
try: # in case instance was explicitly positioned, it won't be in set
cls._instances.remove(instance)
for inst in cls._instances:
if inst.pos > instance.pos:
inst.pos -= 1
except KeyError:
pass
@classmethod
def write(cls, s, file=sys.stdout, end="\n"):
"""
Print a message via tqdm (without overlap with bars)
"""
# Clear all bars
inst_cleared = []
for inst in cls._instances:
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == file or \
(file in [sys.stdout, sys.stderr] and
inst.fp in [sys.stdout, sys.stderr]):
inst.clear()
inst_cleared.append(inst)
# Write the message
file.write(s)
file.write(end)
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh()
# TODO: make list of all instances incl. absolutely positioned ones?
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=sys.stderr, ncols=None, mininterval=0.1,
maxinterval=10.0, miniters=None, ascii=None, disable=False,
unit='it', unit_scale=False, dynamic_ncols=False,
smoothing=0.3, bar_format=None, initial=0, position=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress update interval, in seconds [default: 10.0].
miniters : int, optional
Minimum progress update interval, in iterations.
If specified, will set `mininterval` to 0.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
If unspecified, will use '{l_bar}{bar}{r_bar}', where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (DeprecationWarning("nested is deprecated and"
" automated.\nUse position instead"
" for manual control")
if "nested" in kwargs else
Warning("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols:
if dynamic_ncols: # pragma: no cover
dynamic_ncols = _environ_cols_wrapper()
ncols = dynamic_ncols(file)
else: # pragma: no cover
ncols = _environ_cols_wrapper()(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc + ': ' if desc else ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
self.pos = self._get_free_pos(self) if position is None else position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
if self.pos:
self.moveto(self.pos)
self.sp(self.format_meter(self.n, total, 0,
(dynamic_ncols(file) if dynamic_ncols else ncols),
self.desc, ascii, unit, unit_scale, None, bar_format))
if self.pos:
self.moveto(-self.pos)
# Init the time counter
self.start_t = self.last_print_t = self._time()
def __len__(self):
return len(self.iterable) if self.iterable else self.total
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(self.n, self.total,
time() - self.last_print_t,
self.ncols, self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time
if self.avg_time else None, self.bar_format)
def __lt__(self, other):
# try:
return self.pos < other.pos
# except AttributeError:
# return self.start_t < other.start_t
def __le__(self, other):
return (self < other) or (self == other)
def __eq__(self, other):
# try:
return self.pos == other.pos
# except AttributeError:
# return self.start_t == other.start_t
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def __hash__(self):
return id(self)
def __iter__(self):
''' Backward-compatibility to use: for x in tqdm(iterable) '''
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
_time = self._time
format_meter = self.format_meter
try:
sp = self.sp
except AttributeError:
raise DeprecationWarning('Please use tqdm_gui(...)'
' instead of tqdm(..., gui=True)')
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check the counter first (avoid calls to time())
if n - last_print_n >= miniters:
delta_it = n - last_print_n
cur_t = _time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
if self.pos:
self.moveto(self.pos)
# Printing the bar's update
sp(format_meter(
n, self.total, elapsed,
(dynamic_ncols(self.fp) if dynamic_ncols
else ncols),
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format))
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = self._time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise DeprecationWarning('Please use tqdm_gui(...)'
' instead of tqdm(..., gui=True)')
if self.pos:
self.moveto(self.pos)
# Print bar's update
self.sp(self.format_meter(
self.n, self.total, elapsed,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format))
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = self.pos
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
cur_t = self._time()
# stats for overall rate (no weighted average)
self.sp(self.format_meter(
self.n, self.total, cur_t - self.start_t,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale, None,
self.bar_format))
if pos:
self.moveto(-pos)
else:
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None):
"""
Set/modify description of the progress bar.
"""
self.desc = desc + ': ' if desc else ''
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
def clear(self, nomove=False):
"""
Clear current bar display
"""
if not nomove:
self.moveto(self.pos)
# clear up the bar (can't rely on sp(''))
self.fp.write('\r')
self.fp.write(' ' * (self.ncols if self.ncols else 10))
self.fp.write('\r') # place cursor back at the beginning of line
if not nomove:
self.moveto(-self.pos)
def refresh(self):
"""
Force refresh the display of this bar
"""
self.moveto(self.pos)
# clear up this line's content (whatever there was)
self.clear(nomove=True)
# Print current/last bar state
self.fp.write(self.__repr__())
self.moveto(-self.pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet
import sys
from time import time
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange']
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
@staticmethod
def format_sizeof(num, suffix=''):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= 1000.0
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
if not getattr(fp, 'flush', False): # pragma: no cover
fp.flush = lambda: None
def fp_write(s):
fp.write(_unicode(s))
last_printed_len = [0] # closure over mutable variable (fast)
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
fp.flush()
last_printed_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='',
ascii=False, unit='it', unit_scale=False, rate=None,
bar_format=None):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will printed with an
appropriate SI metric prefix (K = 10^3, M = 10^6, etc.)
[default: False].
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if (rate and (rate < 1)) else None
format_sizeof = tqdm.format_sizeof
rate_fmt = ((format_sizeof(inv_rate if inv_rate else rate)
if unit_scale else
'{0:5.2f}'.format(inv_rate if inv_rate else rate))
if rate else '?') \
+ ('s' if inv_rate else unit) + '/' + (unit if inv_rate else 's')
if unit_scale:
n_fmt = format_sizeof(n)
total_fmt = format_sizeof(total) if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
l_bar = (prefix if prefix else '') + \
'{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': rate if inv_rate is None else inv_rate,
'rate_noinv': rate,
'rate_noinv_fmt': ((format_sizeof(rate)
if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + 'it/s',
'rate_fmt': rate_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix if prefix else '',
# 'bar': full_bar # replaced by procedure below
}
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar, r_bar = l_bar_user.format(**bar_args), r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
n_fmt, unit, elapsed_str, rate_fmt)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
cls._instances.add(instance)
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
""" Skips specified instance """
try:
return max(inst.pos for inst in cls._instances
if inst is not instance) + 1
except ValueError as e:
if "arg is an empty sequence" in str(e):
return 0
raise # pragma: no cover
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
try: # in case instance was explicitly positioned, it won't be in set
cls._instances.remove(instance)
for inst in cls._instances:
if inst.pos > instance.pos:
inst.pos -= 1
except KeyError:
pass
@classmethod
def write(cls, s, file=sys.stdout, end="\n"):
"""
Print a message via tqdm (without overlap with bars)
"""
# Clear all bars
inst_cleared = []
for inst in cls._instances:
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == file or \
(file in [sys.stdout, sys.stderr] and
inst.fp in [sys.stdout, sys.stderr]):
inst.clear()
inst_cleared.append(inst)
# Write the message
file.write(s)
file.write(end)
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh()
# TODO: make list of all instances incl. absolutely positioned ones?
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=sys.stderr, ncols=None, mininterval=0.1,
maxinterval=10.0, miniters=None, ascii=None, disable=False,
unit='it', unit_scale=False, dynamic_ncols=False,
smoothing=0.3, bar_format=None, initial=0, position=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress update interval, in seconds [default: 10.0].
miniters : int, optional
Minimum progress update interval, in iterations.
If specified, will set `mininterval` to 0.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
If unspecified, will use '{l_bar}{bar}{r_bar}', where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (DeprecationWarning("nested is deprecated and"
" automated.\nUse position instead"
" for manual control")
if "nested" in kwargs else
Warning("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols:
if dynamic_ncols: # pragma: no cover
dynamic_ncols = _environ_cols_wrapper()
ncols = dynamic_ncols(file)
else: # pragma: no cover
ncols = _environ_cols_wrapper()(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc + ': ' if desc else ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
self.pos = self._get_free_pos(self) if position is None else position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
if self.pos:
self.moveto(self.pos)
self.sp(self.format_meter(self.n, total, 0,
(dynamic_ncols(file) if dynamic_ncols else ncols),
self.desc, ascii, unit, unit_scale, None, bar_format))
if self.pos:
self.moveto(-self.pos)
# Init the time counter
self.start_t = self.last_print_t = self._time()
def __len__(self):
return len(self.iterable) if self.iterable else self.total
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(self.n, self.total,
time() - self.last_print_t,
self.ncols, self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time
if self.avg_time else None, self.bar_format)
def __lt__(self, other):
# try:
return self.pos < other.pos
# except AttributeError:
# return self.start_t < other.start_t
def __le__(self, other):
return (self < other) or (self == other)
def __eq__(self, other):
# try:
return self.pos == other.pos
# except AttributeError:
# return self.start_t == other.start_t
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def __hash__(self):
return id(self)
def __iter__(self):
''' Backward-compatibility to use: for x in tqdm(iterable) '''
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
_time = self._time
format_meter = self.format_meter
try:
sp = self.sp
except AttributeError:
raise DeprecationWarning('Please use tqdm_gui(...)'
' instead of tqdm(..., gui=True)')
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check the counter first (avoid calls to time())
if n - last_print_n >= miniters:
delta_it = n - last_print_n
cur_t = _time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
if self.pos:
self.moveto(self.pos)
# Printing the bar's update
sp(format_meter(
n, self.total, elapsed,
(dynamic_ncols(self.fp) if dynamic_ncols
else ncols),
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format))
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = self._time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise DeprecationWarning('Please use tqdm_gui(...)'
' instead of tqdm(..., gui=True)')
if self.pos:
self.moveto(self.pos)
# Print bar's update
self.sp(self.format_meter(
self.n, self.total, elapsed,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format))
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = self.pos
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
cur_t = self._time()
# stats for overall rate (no weighted average)
self.sp(self.format_meter(
self.n, self.total, cur_t - self.start_t,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale, None,
self.bar_format))
if pos:
self.moveto(-pos)
else:
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None):
"""
Set/modify description of the progress bar.
"""
self.desc = desc + ': ' if desc else ''
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
def clear(self, nomove=False):
"""
Clear current bar display
"""
if not nomove:
self.moveto(self.pos)
# clear up the bar (can't rely on sp(''))
self.fp.write('\r')
self.fp.write(' ' * (self.ncols if self.ncols else 10))
self.fp.write('\r') # place cursor back at the beginning of line
if not nomove:
self.moveto(-self.pos)
def refresh(self):
"""
Force refresh the display of this bar
"""
self.moveto(self.pos)
# clear up this line's content (whatever there was)
self.clear(nomove=True)
# Print current/last bar state
self.fp.write(self.__repr__())
self.moveto(-self.pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| BugsInPy/BugsInPy/temp/projects/tqdm/bug-8-fixed/tqdm/tqdm/_tqdm.py,BugsInPy/BugsInPy/temp/projects/tqdm/bug-8-buggy/tqdm/tqdm/_tqdm.py |
tornado-bug-5 | #
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop.
Typical applications will use a single `IOLoop` object, accessed via
`IOLoop.current` class method. The `IOLoop.start` method (or
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
In addition to I/O events, the `IOLoop` can also schedule time-based
events. `IOLoop.add_timeout` is a non-blocking alternative to
`time.sleep`.
"""
from __future__ import absolute_import, division, print_function
import collections
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import (
PY3, Configurable, errno_from_exception, timedelta_to_seconds,
TimeoutError, unicode_type, import_object,
)
try:
import signal
except ImportError:
signal = None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
if PY3:
import _thread as thread
else:
import thread
try:
import asyncio
except ImportError:
asyncio = None
_POLL_TIMEOUT = 3600.0
class IOLoop(Configurable):
"""A level-triggered I/O loop.
On Python 3, `IOLoop` is a wrapper around the `asyncio` event
loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD
and Mac OS X) if they are available, or else we fall back on
select(). If you are implementing a system that needs to handle
thousands of simultaneous connections, you should use a system
that supports either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import socket
import tornado.ioloop
from tornado import gen
from tornado.iostream import IOStream
@gen.coroutine
def handle_connection(connection, address):
stream = IOStream(connection)
message = yield stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", 8888))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# In Python 2, _current.instance points to the current IOLoop.
_current = threading.local()
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = dict()
@classmethod
def configure(cls, impl, **kwargs):
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available")
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod
def instance():
"""Deprecated alias for `IOLoop.current()`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
return IOLoop.current()
def install(self):
"""Deprecated alias for `make_current()`.
.. versionchanged:: 5.0
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
self.make_current()
@staticmethod
def clear_instance():
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
"""
IOLoop.clear_current()
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
"""
if asyncio is None:
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
else:
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True)
else:
current = None
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
"""
# The asyncio event loops override this method.
assert asyncio is None
old = getattr(IOLoop._current, "instance", None)
if old is not None:
old.clear_current()
IOLoop._current.instance = self
@staticmethod
def clear_current():
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
"""
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self):
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if asyncio is not None:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
return PollIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = Future()
future_set_exc_info(future_cell[0], sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = Future()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback():
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future_add_done_callback(
future, lambda future: self.add_callback(callback, future))
def run_in_executor(self, executor, func, *args):
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if ThreadPoolExecutor is None:
raise RuntimeError(
"concurrent.futures is required to use IOLoop.run_in_executor")
if executor is None:
if not hasattr(self, '_executor'):
from tornado.process import cpu_count
self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future()
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor):
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = collections.deque()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._pid = os.getpid()
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
@classmethod
def configurable_base(cls):
return PollIOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in list(self._handlers.values()):
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
if hasattr(self, '_executor'):
self._executor.shutdown()
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
if os.getpid() != self._pid:
raise RuntimeError("Cannot share PollIOLoops across processes")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = IOLoop.current(instance=False)
if old_current is not self:
self.make_current()
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
ncallbacks = len(self._callbacks)
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for i in range(ncallbacks):
self._run_callback(self._callbacks.popleft())
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
if old_current is None:
IOLoop.clear_current()
elif old_current is not self:
old_current.make_current()
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if self._closing:
return
# Blindly insert into self._callbacks. This is safe even
# from signal handlers because deque.append is atomic.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if thread.get_ident() != self._thread_ident:
# This will write one byte but Waker.consume() reads many
# at once, so it's ok to write even when not strictly
# necessary.
self._waker.wake()
else:
# If we're on the IOLoop's thread, we don't need to wake anyone.
pass
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, callback, callback_time):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
def _update_next(self, current_time):
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop.
Typical applications will use a single `IOLoop` object, accessed via
`IOLoop.current` class method. The `IOLoop.start` method (or
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
In addition to I/O events, the `IOLoop` can also schedule time-based
events. `IOLoop.add_timeout` is a non-blocking alternative to
`time.sleep`.
"""
from __future__ import absolute_import, division, print_function
import collections
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import (
PY3, Configurable, errno_from_exception, timedelta_to_seconds,
TimeoutError, unicode_type, import_object,
)
try:
import signal
except ImportError:
signal = None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
if PY3:
import _thread as thread
else:
import thread
try:
import asyncio
except ImportError:
asyncio = None
_POLL_TIMEOUT = 3600.0
class IOLoop(Configurable):
"""A level-triggered I/O loop.
On Python 3, `IOLoop` is a wrapper around the `asyncio` event
loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD
and Mac OS X) if they are available, or else we fall back on
select(). If you are implementing a system that needs to handle
thousands of simultaneous connections, you should use a system
that supports either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import socket
import tornado.ioloop
from tornado import gen
from tornado.iostream import IOStream
@gen.coroutine
def handle_connection(connection, address):
stream = IOStream(connection)
message = yield stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", 8888))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# In Python 2, _current.instance points to the current IOLoop.
_current = threading.local()
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = dict()
@classmethod
def configure(cls, impl, **kwargs):
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available")
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod
def instance():
"""Deprecated alias for `IOLoop.current()`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
return IOLoop.current()
def install(self):
"""Deprecated alias for `make_current()`.
.. versionchanged:: 5.0
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
self.make_current()
@staticmethod
def clear_instance():
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
"""
IOLoop.clear_current()
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
"""
if asyncio is None:
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
else:
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True)
else:
current = None
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
"""
# The asyncio event loops override this method.
assert asyncio is None
old = getattr(IOLoop._current, "instance", None)
if old is not None:
old.clear_current()
IOLoop._current.instance = self
@staticmethod
def clear_current():
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
"""
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self):
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if asyncio is not None:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
return PollIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = Future()
future_set_exc_info(future_cell[0], sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = Future()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback():
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future_add_done_callback(
future, lambda future: self.add_callback(callback, future))
def run_in_executor(self, executor, func, *args):
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if ThreadPoolExecutor is None:
raise RuntimeError(
"concurrent.futures is required to use IOLoop.run_in_executor")
if executor is None:
if not hasattr(self, '_executor'):
from tornado.process import cpu_count
self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future()
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor):
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = collections.deque()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._pid = os.getpid()
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
@classmethod
def configurable_base(cls):
return PollIOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in list(self._handlers.values()):
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
if hasattr(self, '_executor'):
self._executor.shutdown()
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
if os.getpid() != self._pid:
raise RuntimeError("Cannot share PollIOLoops across processes")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = IOLoop.current(instance=False)
if old_current is not self:
self.make_current()
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
ncallbacks = len(self._callbacks)
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for i in range(ncallbacks):
self._run_callback(self._callbacks.popleft())
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
if old_current is None:
IOLoop.clear_current()
elif old_current is not self:
old_current.make_current()
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if self._closing:
return
# Blindly insert into self._callbacks. This is safe even
# from signal handlers because deque.append is atomic.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if thread.get_ident() != self._thread_ident:
# This will write one byte but Waker.consume() reads many
# at once, so it's ok to write even when not strictly
# necessary.
self._waker.wake()
else:
# If we're on the IOLoop's thread, we don't need to wake anyone.
pass
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, callback, callback_time):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
def _update_next(self, current_time):
callback_time_sec = self.callback_time / 1000.0
if self._next_timeout <= current_time:
# The period should be measured from the start of one call
# to the start of the next. If one call takes too long,
# skip cycles to get back to a multiple of the original
# schedule.
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
else:
# If the clock moved backwards, ensure we advance the next
# timeout instead of recomputing the same value again.
# This may result in long gaps between callbacks if the
# clock jumps backwards by a lot, but the far more common
# scenario is a small NTP adjustment that should just be
# ignored.
#
# Note that on some systems if time.time() runs slower
# than time.monotonic() (most common on windows), we
# effectively experience a small backwards time jump on
# every iteration because PeriodicCallback uses
# time.time() while asyncio schedules callbacks using
# time.monotonic().
# https://github.com/tornadoweb/tornado/issues/2333
self._next_timeout += callback_time_sec
| BugsInPy/BugsInPy/temp/projects/tornado/bug-5-fixed/tornado/tornado/ioloop.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-5-buggy/tornado/tornado/ioloop.py |
tornado-bug-12 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains implementations of various third-party
authentication schemes.
All the classes in this file are class mixins designed to be used with
the `tornado.web.RequestHandler` class. They are used in two ways:
* On a login handler, use methods such as ``authenticate_redirect()``,
``authorize_redirect()``, and ``get_authenticated_user()`` to
establish the user's identity and store authentication tokens to your
database and/or cookies.
* In non-login handlers, use methods such as ``facebook_request()``
or ``twitter_request()`` to use the authentication tokens to make
requests to the respective services.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OAuth:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
user = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
.. versionchanged:: 4.0
All of the callback interfaces in this module are now guaranteed
to run their callback with an argument of ``None`` on error.
Previously some functions would do this while others would simply
terminate the request on their own. This change also ensures that
errors are more consistently reported through the ``Future`` interfaces.
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import functools
import hashlib
import hmac
import time
import uuid
from tornado.concurrent import TracebackFuture, return_future
from tornado import gen
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.log import gen_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import u, unicode_type, ArgReplacer
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
try:
long # py2
except NameError:
long = int # py3
class AuthError(Exception):
pass
def _auth_future_to_callback(callback, future):
try:
result = future.result()
except AuthError as e:
gen_log.warning(str(e))
result = None
callback(result)
def _auth_return_future(f):
"""Similar to tornado.concurrent.return_future, but uses the auth
module's legacy callback interface.
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
def handle_exception(typ, value, tb):
if future.done():
return False
else:
future.set_exc_info((typ, value, tb))
return True
with ExceptionStackContext(handle_exception):
f(*args, **kwargs)
return future
return wrapper
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
@return_future
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
args["openid.mode"] = u("check_authentication")
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, functools.partial(
self._on_authentication_verified, callback),
method="POST", body=urllib_parse.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, future, response):
if response.error or b"is_valid:true" not in response.body:
future.set_exception(AuthError(
"Invalid OpenID response: %s" % (response.error or
response.body)))
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u("")
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u("")
return self.get_argument(ax_name, u(""))
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u(" ").join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth 1.0 and 1.0a.
See `TwitterMixin` below for an example implementation.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
* ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
* ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
advance registration of callbacks.
Subclasses must also override the `_oauth_get_user_future` and
`_oauth_consumer_token` methods.
"""
@return_future
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None, callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some services (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
functools.partial(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
else:
http_client.fetch(
self._oauth_request_token_url(),
functools.partial(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token.
This method should be called from the handler for your
OAuth callback URL to complete the registration process. We run the
callback with the authenticated user dictionary. This dictionary
will contain an ``access_key`` which can be used to make authorized
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
future.set_exception(AuthError(
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
functools.partial(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, callback,
response):
if response.error:
raise Exception("Could not get request token: %s" % response.error)
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
base64.b64encode(escape.utf8(request_token["secret"])))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_access_token(self, future, response):
if response.error:
future.set_exception(AuthError("Could not fetch access token"))
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
functools.partial(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
"""Subclasses must override this to return their OAuth consumer keys.
The return value should be a `dict` with keys ``key`` and ``secret``.
"""
raise NotImplementedError()
@return_future
def _oauth_get_user_future(self, access_token, callback):
"""Subclasses must override this to get basic information about the
user.
Should return a `.Future` whose result is a dictionary
containing information about the user, which may have been
retrieved by using ``access_token`` to make a request to the
service.
The access token will be added to the returned dictionary to make
the result of `get_authenticated_user`.
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
"""
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, future, user_future):
if user_future.exception() is not None:
future.set_exception(user_future.exception())
return
user = user_future.result()
if not user:
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = escape.to_basestring(signature)
return base_args
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example
implementations.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
@return_future
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None,
callback=None, scope=None, response_type="code"):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id,
"response_type": response_type
}
if extra_params:
args.update(extra_params)
if scope:
args['scope'] = ' '.join(scope)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
callback()
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
@_auth_return_future
def oauth2_request(self, url, callback, access_token=None,
post_args=None, **args):
"""Fetches the given URL auth an OAuth2 access token.
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.oauth2_request(
"https://graph.facebook.com/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
.. versionadded:: 4.3
"""
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = functools.partial(self._on_oauth2_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_oauth2_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
.. versionadded:: 4.3
"""
return httpclient.AsyncHTTPClient()
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key
and Consumer Secret to the application
`~tornado.web.Application.settings` ``twitter_consumer_key`` and
``twitter_consumer_secret``. Use this mixin on the handler for the
URL you registered as your application's callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with Twitter and get access to their stream:
.. testcode::
class TwitterLoginHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
.. testoutput::
:hide:
The user object returned by `~OAuthMixin.get_authenticated_user`
includes the attributes ``username``, ``name``, ``access_token``,
and all of the custom Twitter user attributes described at
https://dev.twitter.com/docs/api/1.1/get/users/show
"""
_OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "https://api.twitter.com/1.1"
@return_future
def authenticate_redirect(self, callback_uri=None, callback=None):
"""Just like `~OAuthMixin.authorize_redirect`, but
auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
functools.partial(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
None, callback))
@_auth_return_future
def twitter_request(self, path, callback=None, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
The path should not include the format or API version number.
(we automatically use JSON format and API version 1).
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at http://dev.twitter.com/
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned through that
process includes an 'access_token' attribute that can be used
to make authenticated requests via this method. Example
usage:
.. testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http_callback = functools.partial(self._on_twitter_request, callback)
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=http_callback)
else:
http.fetch(url, callback=http_callback)
def _on_twitter_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
user = yield self.twitter_request(
"/account/verify_credentials",
access_token=access_token)
if user:
user["username"] = user["screen_name"]
raise gen.Return(user)
class GoogleOAuth2Mixin(OAuth2Mixin):
"""Google authentication using OAuth2.
In order to use, register your application with Google and copy the
relevant parameters to your application settings.
* Go to the Google Dev Console at http://console.developers.google.com
* Select a project, or create a new one.
* In the sidebar on the left, select APIs & Auth.
* In the list of APIs, find the Google+ API service and set it to ON.
* In the sidebar on the left, select Credentials.
* In the OAuth section of the page, select Create New Client ID.
* Set the Redirect URI to point to your auth handler
* Copy the "Client secret" and "Client ID" to the application settings as
{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}
.. versionadded:: 3.2
"""
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
_OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth'
@_auth_return_future
def get_authenticated_user(self, redirect_uri, code, callback):
"""Handles the login for the Google user, returning an access token.
The result is a dictionary containing an ``access_token`` field
([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
Unlike other ``get_authenticated_user`` methods in this package,
this method does not return any additional information about the user.
The returned access token can be used with `OAuth2Mixin.oauth2_request`
to request additional information (perhaps from
``https://www.googleapis.com/oauth2/v2/userinfo``)
Example usage:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
access = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
user = yield self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
# Save the user and access token with
# e.g. set_secure_cookie.
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
"""
http = self.get_auth_http_client()
body = urllib_parse.urlencode({
"redirect_uri": redirect_uri,
"code": code,
"client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
"client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
"grant_type": "authorization_code",
})
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
functools.partial(self._on_access_token, callback),
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
def _on_access_token(self, future, response):
"""Callback function for the exchange to the access token."""
if response.error:
future.set_exception(AuthError('Google auth error: %s' % str(response)))
return
args = escape.json_decode(response.body)
future.set_result(args)
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
_OAUTH_NO_CALLBACKS = False
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
@_auth_return_future
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage:
.. testcode::
class FacebookGraphLoginHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
.. testoutput::
:hide:
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
functools.partial(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=functools.partial(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
future.set_result(fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
This method is a wrapper around `OAuth2Mixin.oauth2_request`;
the only difference is that this method takes a relative path,
while ``oauth2_request`` takes a complete url.
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
return self.oauth2_request(url, callback, access_token,
post_args, **args)
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode_type):
val = val.encode("utf-8")
return urllib_parse.quote(val, safe="~")
def _oauth_parse_response(body):
# I can't find an officially-defined encoding for oauth responses and
# have never seen anyone use non-ascii. Leave the response in a byte
# string for python 2, and use utf8 on python 3.
body = escape.native_str(body)
p = urlparse.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains implementations of various third-party
authentication schemes.
All the classes in this file are class mixins designed to be used with
the `tornado.web.RequestHandler` class. They are used in two ways:
* On a login handler, use methods such as ``authenticate_redirect()``,
``authorize_redirect()``, and ``get_authenticated_user()`` to
establish the user's identity and store authentication tokens to your
database and/or cookies.
* In non-login handlers, use methods such as ``facebook_request()``
or ``twitter_request()`` to use the authentication tokens to make
requests to the respective services.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OAuth:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
user = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
.. versionchanged:: 4.0
All of the callback interfaces in this module are now guaranteed
to run their callback with an argument of ``None`` on error.
Previously some functions would do this while others would simply
terminate the request on their own. This change also ensures that
errors are more consistently reported through the ``Future`` interfaces.
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import functools
import hashlib
import hmac
import time
import uuid
from tornado.concurrent import TracebackFuture, return_future, chain_future
from tornado import gen
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.log import gen_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import u, unicode_type, ArgReplacer
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
try:
long # py2
except NameError:
long = int # py3
class AuthError(Exception):
pass
def _auth_future_to_callback(callback, future):
try:
result = future.result()
except AuthError as e:
gen_log.warning(str(e))
result = None
callback(result)
def _auth_return_future(f):
"""Similar to tornado.concurrent.return_future, but uses the auth
module's legacy callback interface.
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
def handle_exception(typ, value, tb):
if future.done():
return False
else:
future.set_exc_info((typ, value, tb))
return True
with ExceptionStackContext(handle_exception):
f(*args, **kwargs)
return future
return wrapper
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
@return_future
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
args["openid.mode"] = u("check_authentication")
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, functools.partial(
self._on_authentication_verified, callback),
method="POST", body=urllib_parse.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, future, response):
if response.error or b"is_valid:true" not in response.body:
future.set_exception(AuthError(
"Invalid OpenID response: %s" % (response.error or
response.body)))
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u("")
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u("")
return self.get_argument(ax_name, u(""))
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u(" ").join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth 1.0 and 1.0a.
See `TwitterMixin` below for an example implementation.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
* ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
* ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
advance registration of callbacks.
Subclasses must also override the `_oauth_get_user_future` and
`_oauth_consumer_token` methods.
"""
@return_future
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None, callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some services (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
functools.partial(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
else:
http_client.fetch(
self._oauth_request_token_url(),
functools.partial(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token.
This method should be called from the handler for your
OAuth callback URL to complete the registration process. We run the
callback with the authenticated user dictionary. This dictionary
will contain an ``access_key`` which can be used to make authorized
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
future.set_exception(AuthError(
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
functools.partial(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, callback,
response):
if response.error:
raise Exception("Could not get request token: %s" % response.error)
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
base64.b64encode(escape.utf8(request_token["secret"])))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_access_token(self, future, response):
if response.error:
future.set_exception(AuthError("Could not fetch access token"))
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
functools.partial(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
"""Subclasses must override this to return their OAuth consumer keys.
The return value should be a `dict` with keys ``key`` and ``secret``.
"""
raise NotImplementedError()
@return_future
def _oauth_get_user_future(self, access_token, callback):
"""Subclasses must override this to get basic information about the
user.
Should return a `.Future` whose result is a dictionary
containing information about the user, which may have been
retrieved by using ``access_token`` to make a request to the
service.
The access token will be added to the returned dictionary to make
the result of `get_authenticated_user`.
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
"""
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, future, user_future):
if user_future.exception() is not None:
future.set_exception(user_future.exception())
return
user = user_future.result()
if not user:
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = escape.to_basestring(signature)
return base_args
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example
implementations.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
@return_future
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None,
callback=None, scope=None, response_type="code"):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id,
"response_type": response_type
}
if extra_params:
args.update(extra_params)
if scope:
args['scope'] = ' '.join(scope)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
callback()
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
@_auth_return_future
def oauth2_request(self, url, callback, access_token=None,
post_args=None, **args):
"""Fetches the given URL auth an OAuth2 access token.
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.oauth2_request(
"https://graph.facebook.com/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
.. versionadded:: 4.3
"""
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = functools.partial(self._on_oauth2_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_oauth2_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
.. versionadded:: 4.3
"""
return httpclient.AsyncHTTPClient()
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key
and Consumer Secret to the application
`~tornado.web.Application.settings` ``twitter_consumer_key`` and
``twitter_consumer_secret``. Use this mixin on the handler for the
URL you registered as your application's callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with Twitter and get access to their stream:
.. testcode::
class TwitterLoginHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
.. testoutput::
:hide:
The user object returned by `~OAuthMixin.get_authenticated_user`
includes the attributes ``username``, ``name``, ``access_token``,
and all of the custom Twitter user attributes described at
https://dev.twitter.com/docs/api/1.1/get/users/show
"""
_OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "https://api.twitter.com/1.1"
@return_future
def authenticate_redirect(self, callback_uri=None, callback=None):
"""Just like `~OAuthMixin.authorize_redirect`, but
auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
functools.partial(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
None, callback))
@_auth_return_future
def twitter_request(self, path, callback=None, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
The path should not include the format or API version number.
(we automatically use JSON format and API version 1).
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at http://dev.twitter.com/
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned through that
process includes an 'access_token' attribute that can be used
to make authenticated requests via this method. Example
usage:
.. testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http_callback = functools.partial(self._on_twitter_request, callback)
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=http_callback)
else:
http.fetch(url, callback=http_callback)
def _on_twitter_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
user = yield self.twitter_request(
"/account/verify_credentials",
access_token=access_token)
if user:
user["username"] = user["screen_name"]
raise gen.Return(user)
class GoogleOAuth2Mixin(OAuth2Mixin):
"""Google authentication using OAuth2.
In order to use, register your application with Google and copy the
relevant parameters to your application settings.
* Go to the Google Dev Console at http://console.developers.google.com
* Select a project, or create a new one.
* In the sidebar on the left, select APIs & Auth.
* In the list of APIs, find the Google+ API service and set it to ON.
* In the sidebar on the left, select Credentials.
* In the OAuth section of the page, select Create New Client ID.
* Set the Redirect URI to point to your auth handler
* Copy the "Client secret" and "Client ID" to the application settings as
{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}
.. versionadded:: 3.2
"""
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
_OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth'
@_auth_return_future
def get_authenticated_user(self, redirect_uri, code, callback):
"""Handles the login for the Google user, returning an access token.
The result is a dictionary containing an ``access_token`` field
([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
Unlike other ``get_authenticated_user`` methods in this package,
this method does not return any additional information about the user.
The returned access token can be used with `OAuth2Mixin.oauth2_request`
to request additional information (perhaps from
``https://www.googleapis.com/oauth2/v2/userinfo``)
Example usage:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
access = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
user = yield self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
# Save the user and access token with
# e.g. set_secure_cookie.
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
"""
http = self.get_auth_http_client()
body = urllib_parse.urlencode({
"redirect_uri": redirect_uri,
"code": code,
"client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
"client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
"grant_type": "authorization_code",
})
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
functools.partial(self._on_access_token, callback),
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
def _on_access_token(self, future, response):
"""Callback function for the exchange to the access token."""
if response.error:
future.set_exception(AuthError('Google auth error: %s' % str(response)))
return
args = escape.json_decode(response.body)
future.set_result(args)
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
_OAUTH_NO_CALLBACKS = False
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
@_auth_return_future
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage:
.. testcode::
class FacebookGraphLoginHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
.. testoutput::
:hide:
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
functools.partial(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
args = urlparse.parse_qs(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=functools.partial(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
future.set_result(fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
This method is a wrapper around `OAuth2Mixin.oauth2_request`;
the only difference is that this method takes a relative path,
while ``oauth2_request`` takes a complete url.
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
# Thanks to the _auth_return_future decorator, our "callback"
# argument is a Future, which we cannot pass as a callback to
# oauth2_request. Instead, have oauth2_request return a
# future and chain them together.
oauth_future = self.oauth2_request(url, access_token=access_token,
post_args=post_args, **args)
chain_future(oauth_future, callback)
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode_type):
val = val.encode("utf-8")
return urllib_parse.quote(val, safe="~")
def _oauth_parse_response(body):
# I can't find an officially-defined encoding for oauth responses and
# have never seen anyone use non-ascii. Leave the response in a byte
# string for python 2, and use utf8 on python 3.
body = escape.native_str(body)
p = urlparse.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
| BugsInPy/BugsInPy/temp/projects/tornado/bug-12-fixed/tornado/tornado/auth.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-12-buggy/tornado/tornado/auth.py |
tornado-bug-3 | """Blocking and non-blocking HTTP client interfaces.
This module defines a common interface shared by two implementations,
``simple_httpclient`` and ``curl_httpclient``. Applications may either
instantiate their chosen implementation class directly or use the
`AsyncHTTPClient` class from this module, which selects an implementation
that can be overridden with the `AsyncHTTPClient.configure` method.
The default implementation is ``simple_httpclient``, and this is expected
to be suitable for most users' needs. However, some applications may wish
to switch to ``curl_httpclient`` for reasons such as the following:
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
including support for HTTP proxies and the ability to use a specified
network interface.
* ``curl_httpclient`` is more likely to be compatible with sites that are
not-quite-compliant with the HTTP spec, or sites that use little-exercised
features of HTTP.
* ``curl_httpclient`` is faster.
* ``curl_httpclient`` was the default prior to Tornado 2.0.
Note that if you are using ``curl_httpclient``, it is highly
recommended that you use a recent version of ``libcurl`` and
``pycurl``. Currently the minimum supported version of libcurl is
7.22.0, and the minimum version of pycurl is 7.18.2. It is highly
recommended that your ``libcurl`` installation is built with
asynchronous DNS resolver (threaded or c-ares), otherwise you may
encounter various problems with request timeouts (for more
information, see
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
and comments in curl_httpclient.py).
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
import datetime
import functools
from io import BytesIO
import ssl
import time
import weakref
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str
from tornado import gen, httputil
from tornado.ioloop import IOLoop
from tornado.util import Configurable
from typing import Type, Any, Union, Dict, Callable, Optional, cast
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided to make it easier to share code between
synchronous and asynchronous applications. Applications that are
running an `.IOLoop` must use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print(response.body)
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
.. versionchanged:: 5.0
Due to limitations in `asyncio`, it is no longer possible to
use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
Use `AsyncHTTPClient` instead.
"""
def __init__(
self, async_client_class: Type["AsyncHTTPClient"] = None, **kwargs: Any
) -> None:
# Initialize self._closed at the beginning of the constructor
# so that an exception raised here doesn't lead to confusing
# failures in __del__.
self._closed = True
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
# Create the client while our IOLoop is "current", without
# clobbering the thread's real current IOLoop (if any).
async def make_client() -> "AsyncHTTPClient":
await gen.sleep(0)
assert async_client_class is not None
return async_client_class(**kwargs)
self._async_client = self._io_loop.run_sync(make_client)
self._closed = False
def __del__(self) -> None:
self.close()
def close(self) -> None:
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(
self, request: Union["HTTPRequest", str], **kwargs: Any
) -> "HTTPResponse":
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError` unless
the ``raise_error`` keyword argument is set to False.
"""
response = self._io_loop.run_sync(
functools.partial(self._async_client.fetch, request, **kwargs)
)
return response
class AsyncHTTPClient(Configurable):
"""An non-blocking HTTP client.
Example usage::
async def f():
http_client = AsyncHTTPClient()
try:
response = await http_client.fetch("http://www.google.com")
except Exception as e:
print("Error: %s" % e)
else:
print(response.body)
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments should be passed to
the `AsyncHTTPClient` constructor. The implementation subclass as
well as arguments to its constructor can be set with the static
method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for
`HTTPRequest` attributes. For example::
AsyncHTTPClient.configure(
None, defaults=dict(user_agent="MyUserAgent"))
# or with force_instance:
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
_instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient]
@classmethod
def configurable_base(cls) -> Type[Configurable]:
return AsyncHTTPClient
@classmethod
def configurable_default(cls) -> Type[Configurable]:
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls) -> Dict[IOLoop, "AsyncHTTPClient"]:
attr_name = "_async_client_dict_" + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, force_instance: bool = False, **kwargs: Any) -> "AsyncHTTPClient":
io_loop = IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # type: ignore
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
# SimpleAsyncHTTPClient.
instance._instance_cache = instance_cache
if instance_cache is not None:
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, defaults: Dict[str, Any] = None) -> None:
self.io_loop = IOLoop.current()
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
self._closed = False
def close(self) -> None:
"""Destroys this HTTP client, freeing any file descriptors used.
This method is **not needed in normal use** due to the way
that `AsyncHTTPClient` objects are transparently reused.
``close()`` is generally only necessary when either the
`.IOLoop` is also being closed, or the ``force_instance=True``
argument was used when creating the `AsyncHTTPClient`.
No other methods may be called on the `AsyncHTTPClient` after
``close()``.
"""
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
if self._instance_cache.get(self.io_loop) is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
del self._instance_cache[self.io_loop]
def fetch(
self,
request: Union[str, "HTTPRequest"],
raise_error: bool = True,
**kwargs: Any
) -> "Future[HTTPResponse]":
"""Executes a request, asynchronously returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
This method returns a `.Future` whose result is an
`HTTPResponse`. By default, the ``Future`` will raise an
`HTTPError` if the request returned a non-200 response code
(other errors may also be raised if the server could not be
contacted). Instead, if ``raise_error`` is set to False, the
response will always be returned regardless of the response
code.
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
In the callback interface, `HTTPError` is not automatically raised.
Instead, you must check the response's ``error`` attribute or
call its `~HTTPResponse.rethrow` method.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
The ``raise_error=False`` argument only affects the
`HTTPError` raised when a non-200 response code is used,
instead of suppressing all errors.
"""
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
else:
if kwargs:
raise ValueError(
"kwargs can't be used if request is an HTTPRequest object"
)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request_proxy = _RequestProxy(request, self.defaults)
future = Future() # type: Future[HTTPResponse]
def handle_response(response: "HTTPResponse") -> None:
if response.error:
if raise_error or not response._error_is_response_code:
future.set_exception(response.error)
return
future_set_result_unless_cancelled(future, response)
self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response)
return future
def fetch_impl(
self, request: "HTTPRequest", callback: Callable[["HTTPResponse"], None]
) -> None:
raise NotImplementedError()
@classmethod
def configure(
cls, impl: Union[None, str, Type[Configurable]], **kwargs: Any
) -> None:
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP client request object."""
_headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders]
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password="",
allow_nonstandard_methods=False,
validate_cert=True,
)
def __init__(
self,
url: str,
method: str = "GET",
headers: Union[Dict[str, str], httputil.HTTPHeaders] = None,
body: Union[bytes, str] = None,
auth_username: str = None,
auth_password: str = None,
auth_mode: str = None,
connect_timeout: float = None,
request_timeout: float = None,
if_modified_since: Union[float, datetime.datetime] = None,
follow_redirects: bool = None,
max_redirects: int = None,
user_agent: str = None,
use_gzip: bool = None,
network_interface: str = None,
streaming_callback: Callable[[bytes], None] = None,
header_callback: Callable[[str], None] = None,
prepare_curl_callback: Callable[[Any], None] = None,
proxy_host: str = None,
proxy_port: int = None,
proxy_username: str = None,
proxy_password: str = None,
proxy_auth_mode: str = None,
allow_nonstandard_methods: bool = None,
validate_cert: bool = None,
ca_certs: str = None,
allow_ipv6: bool = None,
client_key: str = None,
client_cert: str = None,
body_producer: Callable[[Callable[[bytes], None]], "Future[None]"] = None,
expect_100_continue: bool = False,
decompress_response: bool = None,
ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None,
) -> None:
r"""All parameters except ``url`` are optional.
:arg str url: URL to fetch
:arg str method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
the utf-8 encoding will be used)
:arg body_producer: Callable used for lazy/asynchronous request bodies.
It is called with one argument, a ``write`` function, and should
return a `.Future`. It should call the write function with new
data as it becomes available. The write function returns a
`.Future` which can be used for flow control.
Only one of ``body`` and ``body_producer`` may
be specified. ``body_producer`` is not supported on
``curl_httpclient``. When using ``body_producer`` it is recommended
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg str auth_username: Username for HTTP authentication
:arg str auth_password: Password for HTTP authentication
:arg str auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds,
default 20 seconds
:arg float request_timeout: Timeout for entire request in seconds,
default 20 seconds
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response? Default True.
:arg int max_redirects: Limit for ``follow_redirects``, default 5.
:arg str user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg str network_interface: Network interface or source IP to use for request.
See ``curl_httpclient`` note below.
:arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg collections.abc.Callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg collections.abc.Callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg str proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
currently only supported with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg str proxy_username: HTTP proxy username
:arg str proxy_password: HTTP proxy password
:arg str proxy_auth_mode: HTTP proxy Authentication mode;
default is "basic". supports "basic" and "digest"
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument? Default is False.
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate? Default is True.
:arg str ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg str client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg str client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
and ``client_cert``.
:arg bool allow_ipv6: Use IPv6 when available? Default is true.
:arg bool expect_100_continue: If true, send the
``Expect: 100-continue`` header and wait for a continue response
before sending the request body. Only supported with
simple_httpclient.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
.. versionadded:: 4.0
The ``body_producer`` and ``expect_100_continue`` arguments.
.. versionadded:: 4.2
The ``ssl_options`` argument.
.. versionadded:: 4.5
The ``proxy_auth_mode`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since
)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.proxy_auth_mode = proxy_auth_mode
self.url = url
self.method = method
self.body = body
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response # type: Optional[bool]
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self) -> httputil.HTTPHeaders:
# TODO: headers may actually be a plain dict until fairly late in
# the process (AsyncHTTPClient.fetch), but practically speaking,
# whenever the property is used they're already HTTPHeaders.
return self._headers # type: ignore
@headers.setter
def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None:
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value # type: ignore
@property
def body(self) -> bytes:
return self._body
@body.setter
def body(self, value: Union[bytes, str]) -> None:
self._body = utf8(value)
class HTTPResponse(object):
"""HTTP Response object.
Attributes:
* request: HTTPRequest object
* code: numeric HTTP status code, e.g. 200 or 404
* reason: human-readable reason phrase describing the status code
* headers: `tornado.httputil.HTTPHeaders` object
* effective_url: final location of the resource after following any
redirects
* buffer: ``cStringIO`` object for response body
* body: response body as bytes (created on demand from ``self.buffer``)
* error: Exception object, if any
* request_time: seconds from request start to finish. Includes all network
operations from DNS resolution to receiving the last byte of data.
Does not include time spent in the queue (due to the ``max_clients`` option).
If redirects were followed, only includes the final request.
* start_time: Time at which the HTTP operation started, based on `time.time`
(not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request
timed out while in the queue.
* time_info: dictionary of diagnostic timing information from the request.
Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
.. versionadded:: 5.1
Added the ``start_time`` attribute.
.. versionchanged:: 5.1
The ``request_time`` attribute previously included time spent in the queue
for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time
is excluded in both implementations. ``request_time`` is now more accurate for
``curl_httpclient`` because it uses a monotonic clock when available.
"""
# I'm not sure why these don't get type-inferred from the references in __init__.
error = None # type: Optional[BaseException]
_error_is_response_code = False
request = None # type: HTTPRequest
def __init__(
self,
request: HTTPRequest,
code: int,
headers: httputil.HTTPHeaders = None,
buffer: BytesIO = None,
effective_url: str = None,
error: BaseException = None,
request_time: float = None,
time_info: Dict[str, float] = None,
reason: str = None,
start_time: float = None,
) -> None:
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None # type: Optional[bytes]
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
self._error_is_response_code = False
if error is None:
if self.code < 200 or self.code >= 300:
self._error_is_response_code = True
self.error = HTTPError(self.code, message=self.reason, response=self)
else:
self.error = None
else:
self.error = error
self.start_time = start_time
self.request_time = request_time
self.time_info = time_info or {}
@property
def body(self) -> bytes:
if self.buffer is None:
raise ValueError("body not set")
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
def rethrow(self) -> None:
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self) -> str:
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPClientError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
.. versionchanged:: 5.1
Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with
`tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains
as an alias.
"""
def __init__(
self, code: int, message: str = None, response: HTTPResponse = None
) -> None:
self.code = code
self.message = message or httputil.responses.get(code, "Unknown")
self.response = response
super(HTTPClientError, self).__init__(code, message, response)
def __str__(self) -> str:
return "HTTP %d: %s" % (self.code, self.message)
# There is a cyclic reference between self and self.response,
# which breaks the default __repr__ implementation.
# (especially on pypy, which doesn't have the same recursion
# detection as cpython).
__repr__ = __str__
HTTPError = HTTPClientError
class _RequestProxy(object):
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(
self, request: HTTPRequest, defaults: Optional[Dict[str, Any]]
) -> None:
self.request = request
self.defaults = defaults
def __getattr__(self, name: str) -> Any:
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main() -> None:
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
define("proxy_host", type=str)
define("proxy_port", type=int)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(
arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
proxy_host=options.proxy_host,
proxy_port=options.proxy_port,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
"""Blocking and non-blocking HTTP client interfaces.
This module defines a common interface shared by two implementations,
``simple_httpclient`` and ``curl_httpclient``. Applications may either
instantiate their chosen implementation class directly or use the
`AsyncHTTPClient` class from this module, which selects an implementation
that can be overridden with the `AsyncHTTPClient.configure` method.
The default implementation is ``simple_httpclient``, and this is expected
to be suitable for most users' needs. However, some applications may wish
to switch to ``curl_httpclient`` for reasons such as the following:
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
including support for HTTP proxies and the ability to use a specified
network interface.
* ``curl_httpclient`` is more likely to be compatible with sites that are
not-quite-compliant with the HTTP spec, or sites that use little-exercised
features of HTTP.
* ``curl_httpclient`` is faster.
* ``curl_httpclient`` was the default prior to Tornado 2.0.
Note that if you are using ``curl_httpclient``, it is highly
recommended that you use a recent version of ``libcurl`` and
``pycurl``. Currently the minimum supported version of libcurl is
7.22.0, and the minimum version of pycurl is 7.18.2. It is highly
recommended that your ``libcurl`` installation is built with
asynchronous DNS resolver (threaded or c-ares), otherwise you may
encounter various problems with request timeouts (for more
information, see
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
and comments in curl_httpclient.py).
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
import datetime
import functools
from io import BytesIO
import ssl
import time
import weakref
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str
from tornado import gen, httputil
from tornado.ioloop import IOLoop
from tornado.util import Configurable
from typing import Type, Any, Union, Dict, Callable, Optional, cast
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided to make it easier to share code between
synchronous and asynchronous applications. Applications that are
running an `.IOLoop` must use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print(response.body)
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
.. versionchanged:: 5.0
Due to limitations in `asyncio`, it is no longer possible to
use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
Use `AsyncHTTPClient` instead.
"""
def __init__(
self, async_client_class: Type["AsyncHTTPClient"] = None, **kwargs: Any
) -> None:
# Initialize self._closed at the beginning of the constructor
# so that an exception raised here doesn't lead to confusing
# failures in __del__.
self._closed = True
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
# Create the client while our IOLoop is "current", without
# clobbering the thread's real current IOLoop (if any).
async def make_client() -> "AsyncHTTPClient":
await gen.sleep(0)
assert async_client_class is not None
return async_client_class(**kwargs)
self._async_client = self._io_loop.run_sync(make_client)
self._closed = False
def __del__(self) -> None:
self.close()
def close(self) -> None:
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(
self, request: Union["HTTPRequest", str], **kwargs: Any
) -> "HTTPResponse":
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError` unless
the ``raise_error`` keyword argument is set to False.
"""
response = self._io_loop.run_sync(
functools.partial(self._async_client.fetch, request, **kwargs)
)
return response
class AsyncHTTPClient(Configurable):
"""An non-blocking HTTP client.
Example usage::
async def f():
http_client = AsyncHTTPClient()
try:
response = await http_client.fetch("http://www.google.com")
except Exception as e:
print("Error: %s" % e)
else:
print(response.body)
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments should be passed to
the `AsyncHTTPClient` constructor. The implementation subclass as
well as arguments to its constructor can be set with the static
method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for
`HTTPRequest` attributes. For example::
AsyncHTTPClient.configure(
None, defaults=dict(user_agent="MyUserAgent"))
# or with force_instance:
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
_instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient]
@classmethod
def configurable_base(cls) -> Type[Configurable]:
return AsyncHTTPClient
@classmethod
def configurable_default(cls) -> Type[Configurable]:
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls) -> Dict[IOLoop, "AsyncHTTPClient"]:
attr_name = "_async_client_dict_" + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, force_instance: bool = False, **kwargs: Any) -> "AsyncHTTPClient":
io_loop = IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # type: ignore
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
# SimpleAsyncHTTPClient.
instance._instance_cache = instance_cache
if instance_cache is not None:
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, defaults: Dict[str, Any] = None) -> None:
self.io_loop = IOLoop.current()
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
self._closed = False
def close(self) -> None:
"""Destroys this HTTP client, freeing any file descriptors used.
This method is **not needed in normal use** due to the way
that `AsyncHTTPClient` objects are transparently reused.
``close()`` is generally only necessary when either the
`.IOLoop` is also being closed, or the ``force_instance=True``
argument was used when creating the `AsyncHTTPClient`.
No other methods may be called on the `AsyncHTTPClient` after
``close()``.
"""
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
cached_val = self._instance_cache.pop(self.io_loop, None)
# If there's an object other than self in the instance
# cache for our IOLoop, something has gotten mixed up. A
# value of None appears to be possible when this is called
# from a destructor (HTTPClient.__del__) as the weakref
# gets cleared before the destructor runs.
if cached_val is not None and cached_val is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
def fetch(
self,
request: Union[str, "HTTPRequest"],
raise_error: bool = True,
**kwargs: Any
) -> "Future[HTTPResponse]":
"""Executes a request, asynchronously returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
This method returns a `.Future` whose result is an
`HTTPResponse`. By default, the ``Future`` will raise an
`HTTPError` if the request returned a non-200 response code
(other errors may also be raised if the server could not be
contacted). Instead, if ``raise_error`` is set to False, the
response will always be returned regardless of the response
code.
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
In the callback interface, `HTTPError` is not automatically raised.
Instead, you must check the response's ``error`` attribute or
call its `~HTTPResponse.rethrow` method.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
The ``raise_error=False`` argument only affects the
`HTTPError` raised when a non-200 response code is used,
instead of suppressing all errors.
"""
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
else:
if kwargs:
raise ValueError(
"kwargs can't be used if request is an HTTPRequest object"
)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request_proxy = _RequestProxy(request, self.defaults)
future = Future() # type: Future[HTTPResponse]
def handle_response(response: "HTTPResponse") -> None:
if response.error:
if raise_error or not response._error_is_response_code:
future.set_exception(response.error)
return
future_set_result_unless_cancelled(future, response)
self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response)
return future
def fetch_impl(
self, request: "HTTPRequest", callback: Callable[["HTTPResponse"], None]
) -> None:
raise NotImplementedError()
@classmethod
def configure(
cls, impl: Union[None, str, Type[Configurable]], **kwargs: Any
) -> None:
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP client request object."""
_headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders]
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password="",
allow_nonstandard_methods=False,
validate_cert=True,
)
def __init__(
self,
url: str,
method: str = "GET",
headers: Union[Dict[str, str], httputil.HTTPHeaders] = None,
body: Union[bytes, str] = None,
auth_username: str = None,
auth_password: str = None,
auth_mode: str = None,
connect_timeout: float = None,
request_timeout: float = None,
if_modified_since: Union[float, datetime.datetime] = None,
follow_redirects: bool = None,
max_redirects: int = None,
user_agent: str = None,
use_gzip: bool = None,
network_interface: str = None,
streaming_callback: Callable[[bytes], None] = None,
header_callback: Callable[[str], None] = None,
prepare_curl_callback: Callable[[Any], None] = None,
proxy_host: str = None,
proxy_port: int = None,
proxy_username: str = None,
proxy_password: str = None,
proxy_auth_mode: str = None,
allow_nonstandard_methods: bool = None,
validate_cert: bool = None,
ca_certs: str = None,
allow_ipv6: bool = None,
client_key: str = None,
client_cert: str = None,
body_producer: Callable[[Callable[[bytes], None]], "Future[None]"] = None,
expect_100_continue: bool = False,
decompress_response: bool = None,
ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None,
) -> None:
r"""All parameters except ``url`` are optional.
:arg str url: URL to fetch
:arg str method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
the utf-8 encoding will be used)
:arg body_producer: Callable used for lazy/asynchronous request bodies.
It is called with one argument, a ``write`` function, and should
return a `.Future`. It should call the write function with new
data as it becomes available. The write function returns a
`.Future` which can be used for flow control.
Only one of ``body`` and ``body_producer`` may
be specified. ``body_producer`` is not supported on
``curl_httpclient``. When using ``body_producer`` it is recommended
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg str auth_username: Username for HTTP authentication
:arg str auth_password: Password for HTTP authentication
:arg str auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds,
default 20 seconds
:arg float request_timeout: Timeout for entire request in seconds,
default 20 seconds
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response? Default True.
:arg int max_redirects: Limit for ``follow_redirects``, default 5.
:arg str user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg str network_interface: Network interface or source IP to use for request.
See ``curl_httpclient`` note below.
:arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg collections.abc.Callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg collections.abc.Callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg str proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
currently only supported with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg str proxy_username: HTTP proxy username
:arg str proxy_password: HTTP proxy password
:arg str proxy_auth_mode: HTTP proxy Authentication mode;
default is "basic". supports "basic" and "digest"
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument? Default is False.
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate? Default is True.
:arg str ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg str client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg str client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
and ``client_cert``.
:arg bool allow_ipv6: Use IPv6 when available? Default is true.
:arg bool expect_100_continue: If true, send the
``Expect: 100-continue`` header and wait for a continue response
before sending the request body. Only supported with
simple_httpclient.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
.. versionadded:: 4.0
The ``body_producer`` and ``expect_100_continue`` arguments.
.. versionadded:: 4.2
The ``ssl_options`` argument.
.. versionadded:: 4.5
The ``proxy_auth_mode`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since
)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.proxy_auth_mode = proxy_auth_mode
self.url = url
self.method = method
self.body = body
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response # type: Optional[bool]
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self) -> httputil.HTTPHeaders:
# TODO: headers may actually be a plain dict until fairly late in
# the process (AsyncHTTPClient.fetch), but practically speaking,
# whenever the property is used they're already HTTPHeaders.
return self._headers # type: ignore
@headers.setter
def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None:
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value # type: ignore
@property
def body(self) -> bytes:
return self._body
@body.setter
def body(self, value: Union[bytes, str]) -> None:
self._body = utf8(value)
class HTTPResponse(object):
"""HTTP Response object.
Attributes:
* request: HTTPRequest object
* code: numeric HTTP status code, e.g. 200 or 404
* reason: human-readable reason phrase describing the status code
* headers: `tornado.httputil.HTTPHeaders` object
* effective_url: final location of the resource after following any
redirects
* buffer: ``cStringIO`` object for response body
* body: response body as bytes (created on demand from ``self.buffer``)
* error: Exception object, if any
* request_time: seconds from request start to finish. Includes all network
operations from DNS resolution to receiving the last byte of data.
Does not include time spent in the queue (due to the ``max_clients`` option).
If redirects were followed, only includes the final request.
* start_time: Time at which the HTTP operation started, based on `time.time`
(not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request
timed out while in the queue.
* time_info: dictionary of diagnostic timing information from the request.
Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
.. versionadded:: 5.1
Added the ``start_time`` attribute.
.. versionchanged:: 5.1
The ``request_time`` attribute previously included time spent in the queue
for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time
is excluded in both implementations. ``request_time`` is now more accurate for
``curl_httpclient`` because it uses a monotonic clock when available.
"""
# I'm not sure why these don't get type-inferred from the references in __init__.
error = None # type: Optional[BaseException]
_error_is_response_code = False
request = None # type: HTTPRequest
def __init__(
self,
request: HTTPRequest,
code: int,
headers: httputil.HTTPHeaders = None,
buffer: BytesIO = None,
effective_url: str = None,
error: BaseException = None,
request_time: float = None,
time_info: Dict[str, float] = None,
reason: str = None,
start_time: float = None,
) -> None:
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None # type: Optional[bytes]
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
self._error_is_response_code = False
if error is None:
if self.code < 200 or self.code >= 300:
self._error_is_response_code = True
self.error = HTTPError(self.code, message=self.reason, response=self)
else:
self.error = None
else:
self.error = error
self.start_time = start_time
self.request_time = request_time
self.time_info = time_info or {}
@property
def body(self) -> bytes:
if self.buffer is None:
raise ValueError("body not set")
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
def rethrow(self) -> None:
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self) -> str:
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPClientError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
.. versionchanged:: 5.1
Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with
`tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains
as an alias.
"""
def __init__(
self, code: int, message: str = None, response: HTTPResponse = None
) -> None:
self.code = code
self.message = message or httputil.responses.get(code, "Unknown")
self.response = response
super(HTTPClientError, self).__init__(code, message, response)
def __str__(self) -> str:
return "HTTP %d: %s" % (self.code, self.message)
# There is a cyclic reference between self and self.response,
# which breaks the default __repr__ implementation.
# (especially on pypy, which doesn't have the same recursion
# detection as cpython).
__repr__ = __str__
HTTPError = HTTPClientError
class _RequestProxy(object):
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(
self, request: HTTPRequest, defaults: Optional[Dict[str, Any]]
) -> None:
self.request = request
self.defaults = defaults
def __getattr__(self, name: str) -> Any:
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main() -> None:
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
define("proxy_host", type=str)
define("proxy_port", type=int)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(
arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
proxy_host=options.proxy_host,
proxy_port=options.proxy_port,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
| BugsInPy/BugsInPy/temp/projects/tornado/bug-3-fixed/tornado/tornado/httpclient.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-3-buggy/tornado/tornado/httpclient.py |
tornado-bug-15 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import platform
import sys
import warnings
try:
# Use setuptools if available, for install_requires (among other things).
import setuptools
from setuptools import setup
except ImportError:
setuptools = None
from distutils.core import setup
from distutils.core import Extension
# The following code is copied from
# https://github.com/mongodb/mongo-python-driver/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
from distutils.command.build_ext import build_ext
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up websocket masking, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for Tornado to run,
although they do result in significant speed improvements for
websockets.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("Extension modules",
"There was an issue with "
"your platform configuration"
" - see above."))
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"The output above "
"this warning shows how "
"the compilation "
"failed."))
kwargs = {}
version = "4.2"
with open('README.rst') as f:
kwargs['long_description'] = f.read()
if (platform.python_implementation() == 'CPython' and
os.environ.get('TORNADO_EXTENSION') != '0'):
# This extension builds and works on pypy as well, although pypy's jit
# produces equivalent performance.
kwargs['ext_modules'] = [
Extension('tornado.speedups',
sources=['tornado/speedups.c']),
]
if os.environ.get('TORNADO_EXTENSION') != '1':
# Unless the user has specified that the extension is mandatory,
# fall back to the pure-python implementation on any build failure.
kwargs['cmdclass'] = {'build_ext': custom_build_ext}
if setuptools is not None:
# If setuptools is not available, you're on your own for dependencies.
install_requires = []
if sys.version_info < (3, 2):
install_requires.append('backports.ssl_match_hostname')
if sys.version_info < (3, 4):
# Certifi is also optional on 2.7.9+, although making our dependencies
# conditional on micro version numbers seems like a bad idea
# until we have more declarative metadata.
install_requires.append('certifi')
kwargs['install_requires'] = install_requires
setup(
name="tornado",
version=version,
packages=["tornado", "tornado.test", "tornado.platform"],
package_data={
# data files need to be listed both here (which determines what gets
# installed) and in MANIFEST.in (which determines what gets included
# in the sdist tarball)
"tornado.test": [
"README",
"csv_translations/fr_FR.csv",
"gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo",
"gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po",
"options_test.cfg",
"static/robots.txt",
"static/dir/index.html",
"templates/utf8.html",
"test.crt",
"test.key",
],
},
author="Facebook",
author_email="[email protected]",
url="http://www.tornadoweb.org/",
license="http://www.apache.org/licenses/LICENSE-2.0",
description="Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed.",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
**kwargs
)
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import platform
import sys
import warnings
try:
# Use setuptools if available, for install_requires (among other things).
import setuptools
from setuptools import setup
except ImportError:
setuptools = None
from distutils.core import setup
from distutils.core import Extension
# The following code is copied from
# https://github.com/mongodb/mongo-python-driver/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
from distutils.command.build_ext import build_ext
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up websocket masking, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for Tornado to run,
although they do result in significant speed improvements for
websockets.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("Extension modules",
"There was an issue with "
"your platform configuration"
" - see above."))
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"The output above "
"this warning shows how "
"the compilation "
"failed."))
kwargs = {}
version = "4.2"
with open('README.rst') as f:
kwargs['long_description'] = f.read()
if (platform.python_implementation() == 'CPython' and
os.environ.get('TORNADO_EXTENSION') != '0'):
# This extension builds and works on pypy as well, although pypy's jit
# produces equivalent performance.
kwargs['ext_modules'] = [
Extension('tornado.speedups',
sources=['tornado/speedups.c']),
]
if os.environ.get('TORNADO_EXTENSION') != '1':
# Unless the user has specified that the extension is mandatory,
# fall back to the pure-python implementation on any build failure.
kwargs['cmdclass'] = {'build_ext': custom_build_ext}
if setuptools is not None:
# If setuptools is not available, you're on your own for dependencies.
install_requires = []
if sys.version_info < (3, 2):
install_requires.append('backports.ssl_match_hostname')
if sys.version_info < (3, 4):
# Certifi is also optional on 2.7.9+, although making our dependencies
# conditional on micro version numbers seems like a bad idea
# until we have more declarative metadata.
install_requires.append('certifi')
kwargs['install_requires'] = install_requires
setup(
name="tornado",
version=version,
packages=["tornado", "tornado.test", "tornado.platform"],
package_data={
# data files need to be listed both here (which determines what gets
# installed) and in MANIFEST.in (which determines what gets included
# in the sdist tarball)
"tornado.test": [
"README",
"csv_translations/fr_FR.csv",
"gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo",
"gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po",
"options_test.cfg",
"static/robots.txt",
"static/dir/index.html",
"static_foo.txt",
"templates/utf8.html",
"test.crt",
"test.key",
],
},
author="Facebook",
author_email="[email protected]",
url="http://www.tornadoweb.org/",
license="http://www.apache.org/licenses/LICENSE-2.0",
description="Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed.",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
**kwargs
)
| BugsInPy/BugsInPy/temp/projects/tornado/bug-15-fixed/tornado/setup.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-15-buggy/tornado/setup.py |
tornado-bug-11 | #!/usr/bin/env python
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client and server implementations of HTTP/1.x.
.. versionadded:: 4.0
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
from tornado.concurrent import Future
from tornado.escape import native_str, utf8
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado.log import gen_log, app_log
from tornado import stack_context
from tornado.util import GzipDecompressor, PY3
class _QuietException(Exception):
def __init__(self):
pass
class _ExceptionLoggingContext(object):
"""Used with the ``with`` statement when calling delegate methods to
log any exceptions with the given logger. Any exceptions caught are
converted to _QuietException
"""
def __init__(self, logger):
self.logger = logger
def __enter__(self):
pass
def __exit__(self, typ, value, tb):
if value is not None:
self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
raise _QuietException
class HTTP1ConnectionParameters(object):
"""Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
"""
def __init__(self, no_keep_alive=False, chunk_size=None,
max_header_size=None, header_timeout=None, max_body_size=None,
body_timeout=None, decompress=False):
"""
:arg bool no_keep_alive: If true, always close the connection after
one request.
:arg int chunk_size: how much data to read into memory at once
:arg int max_header_size: maximum amount of data for HTTP headers
:arg float header_timeout: how long to wait for all headers (seconds)
:arg int max_body_size: maximum amount of data for body
:arg float body_timeout: how long to wait while reading body (seconds)
:arg bool decompress: if true, decode incoming
``Content-Encoding: gzip``
"""
self.no_keep_alive = no_keep_alive
self.chunk_size = chunk_size or 65536
self.max_header_size = max_header_size or 65536
self.header_timeout = header_timeout
self.max_body_size = max_body_size
self.body_timeout = body_timeout
self.decompress = decompress
class HTTP1Connection(httputil.HTTPConnection):
"""Implements the HTTP/1.x protocol.
This class can be on its own for clients, or via `HTTP1ServerConnection`
for servers.
"""
def __init__(self, stream, is_client, params=None, context=None):
"""
:arg stream: an `.IOStream`
:arg bool is_client: client or server
:arg params: a `.HTTP1ConnectionParameters` instance or ``None``
:arg context: an opaque application-defined object that can be accessed
as ``connection.context``.
"""
self.is_client = is_client
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self.no_keep_alive = params.no_keep_alive
# The body limits can be altered by the delegate, so save them
# here instead of just referencing self.params later.
self._max_body_size = (self.params.max_body_size or
self.stream.max_buffer_size)
self._body_timeout = self.params.body_timeout
# _write_finished is set to True when finish() has been called,
# i.e. there will be no more data sent. Data may still be in the
# stream's write buffer.
self._write_finished = False
# True when we have read the entire incoming body.
self._read_finished = False
# _finish_future resolves when all data has been written and flushed
# to the IOStream.
self._finish_future = Future()
# If true, the connection should be closed after this request
# (after the response has been written in the server side,
# and after it has been read in the client)
self._disconnect_on_finish = False
self._clear_callbacks()
# Save the start lines after we read or write them; they
# affect later processing (e.g. 304 responses and HEAD methods
# have content-length but no bodies)
self._request_start_line = None
self._response_start_line = None
self._request_headers = None
# True if we are writing output with chunked encoding.
self._chunking_output = None
# While reading a body with a content-length, this is the
# amount left to read.
self._expected_content_remaining = None
# A Future for our outgoing writes, returned by IOStream.write.
self._pending_write = None
def read_response(self, delegate):
"""Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to None after the full response has
been read.
"""
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate)
@gen.coroutine
def _read_message(self, delegate):
need_delegate_close = False
try:
header_future = self.stream.read_until_regex(
b"\r?\n\r?\n",
max_bytes=self.params.max_header_size)
if self.params.header_timeout is None:
header_data = yield header_future
else:
try:
header_data = yield gen.with_timeout(
self.stream.io_loop.time() + self.params.header_timeout,
header_future,
io_loop=self.stream.io_loop,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
self.close()
raise gen.Return(False)
start_line, headers = self._parse_headers(header_data)
if self.is_client:
start_line = httputil.parse_response_start_line(start_line)
self._response_start_line = start_line
else:
start_line = httputil.parse_request_start_line(start_line)
self._request_start_line = start_line
self._request_headers = headers
self._disconnect_on_finish = not self._can_keep_alive(
start_line, headers)
need_delegate_close = True
with _ExceptionLoggingContext(app_log):
header_future = delegate.headers_received(start_line, headers)
if header_future is not None:
yield header_future
if self.stream is None:
# We've been detached.
need_delegate_close = False
raise gen.Return(False)
skip_body = False
if self.is_client:
if (self._request_start_line is not None and
self._request_start_line.method == 'HEAD'):
skip_body = True
code = start_line.code
if code == 304:
# 304 responses may include the content-length header
# but do not actually have a body.
# http://tools.ietf.org/html/rfc7230#section-3.3
skip_body = True
if code >= 100 and code < 200:
# 1xx responses should never indicate the presence of
# a body.
if ('Content-Length' in headers or
'Transfer-Encoding' in headers):
raise httputil.HTTPInputError(
"Response code %d cannot have body" % code)
# TODO: client delegates will get headers_received twice
# in the case of a 100-continue. Document or change?
yield self._read_message(delegate)
else:
if (headers.get("Expect") == "100-continue" and
not self._write_finished):
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
if not skip_body:
body_future = self._read_body(
start_line.code if self.is_client else 0, headers, delegate)
if body_future is not None:
if self._body_timeout is None:
yield body_future
else:
try:
yield gen.with_timeout(
self.stream.io_loop.time() + self._body_timeout,
body_future, self.stream.io_loop,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
gen_log.info("Timeout reading body from %s",
self.context)
self.stream.close()
raise gen.Return(False)
self._read_finished = True
if not self._write_finished or self.is_client:
need_delegate_close = False
with _ExceptionLoggingContext(app_log):
delegate.finish()
# If we're waiting for the application to produce an asynchronous
# response, and we're not detached, register a close callback
# on the stream (we didn't need one while we were reading)
if (not self._finish_future.done() and
self.stream is not None and
not self.stream.closed()):
self.stream.set_close_callback(self._on_connection_close)
yield self._finish_future
if self.is_client and self._disconnect_on_finish:
self.close()
if self.stream is None:
raise gen.Return(False)
except httputil.HTTPInputError as e:
gen_log.info("Malformed HTTP message from %s: %s",
self.context, e)
self.close()
raise gen.Return(False)
finally:
if need_delegate_close:
with _ExceptionLoggingContext(app_log):
delegate.on_connection_close()
self._clear_callbacks()
raise gen.Return(True)
def _clear_callbacks(self):
"""Clears the callback attributes.
This allows the request handler to be garbage collected more
quickly in CPython by breaking up reference cycles.
"""
self._write_callback = None
self._write_future = None
self._close_callback = None
if self.stream is not None:
self.stream.set_close_callback(None)
def set_close_callback(self, callback):
"""Sets a callback that will be run when the connection is closed.
.. deprecated:: 4.0
Use `.HTTPMessageDelegate.on_connection_close` instead.
"""
self._close_callback = stack_context.wrap(callback)
def _on_connection_close(self):
# Note that this callback is only registered on the IOStream
# when we have finished reading the request and are waiting for
# the application to produce its response.
if self._close_callback is not None:
callback = self._close_callback
self._close_callback = None
callback()
if not self._finish_future.done():
self._finish_future.set_result(None)
self._clear_callbacks()
def close(self):
if self.stream is not None:
self.stream.close()
self._clear_callbacks()
if not self._finish_future.done():
self._finish_future.set_result(None)
def detach(self):
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all further
HTTP processing. May only be called during
`.HTTPMessageDelegate.headers_received`. Intended for implementing
protocols like websockets that tunnel over an HTTP handshake.
"""
self._clear_callbacks()
stream = self.stream
self.stream = None
if not self._finish_future.done():
self._finish_future.set_result(None)
return stream
def set_body_timeout(self, timeout):
"""Sets the body timeout for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._body_timeout = timeout
def set_max_body_size(self, max_body_size):
"""Sets the body size limit for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._max_body_size = max_body_size
def write_headers(self, start_line, headers, chunk=None, callback=None):
"""Implements `.HTTPConnection.write_headers`."""
lines = []
if self.is_client:
self._request_start_line = start_line
lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
# Client requests with a non-empty body must have either a
# Content-Length or a Transfer-Encoding.
self._chunking_output = (
start_line.method in ('POST', 'PUT', 'PATCH') and
'Content-Length' not in headers and
'Transfer-Encoding' not in headers)
else:
self._response_start_line = start_line
lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2])))
self._chunking_output = (
# TODO: should this use
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == 'HTTP/1.1' and
# 304 responses have no body (not even a zero-length body), and so
# should not have either Content-Length or Transfer-Encoding.
# headers.
start_line.code not in (204, 304) and
# No need to chunk the output if a Content-Length is specified.
'Content-Length' not in headers and
# Applications are discouraged from touching Transfer-Encoding,
# but if they do, leave it alone.
'Transfer-Encoding' not in headers)
# If a 1.0 client asked for keep-alive, add the header.
if (self._request_start_line.version == 'HTTP/1.0' and
(self._request_headers.get('Connection', '').lower() ==
'keep-alive')):
headers['Connection'] = 'Keep-Alive'
if self._chunking_output:
headers['Transfer-Encoding'] = 'chunked'
if (not self.is_client and
(self._request_start_line.method == 'HEAD' or
start_line.code == 304)):
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
# TODO: headers are supposed to be of type str, but we still have some
# cases that let bytes slip through. Remove these native_str calls when those
# are fixed.
header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all())
if PY3:
lines.extend(l.encode('latin1') for l in header_lines)
else:
lines.extend(header_lines)
for line in lines:
if b'\n' in line:
raise ValueError('Newline in header: ' + repr(line))
future = None
if self.stream.closed():
future = self._write_future = Future()
future.set_exception(iostream.StreamClosedError())
future.exception()
else:
if callback is not None:
self._write_callback = stack_context.wrap(callback)
else:
future = self._write_future = Future()
data = b"\r\n".join(lines) + b"\r\n\r\n"
if chunk:
data += self._format_chunk(chunk)
self._pending_write = self.stream.write(data)
self._pending_write.add_done_callback(self._on_write_complete)
return future
def _format_chunk(self, chunk):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
# Close the stream now to stop further framing errors.
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
if self._chunking_output and chunk:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
else:
return chunk
def write(self, chunk, callback=None):
"""Implements `.HTTPConnection.write`.
For backwards compatibility is is allowed but deprecated to
skip `write_headers` and instead call `write()` with a
pre-encoded header block.
"""
future = None
if self.stream.closed():
future = self._write_future = Future()
self._write_future.set_exception(iostream.StreamClosedError())
self._write_future.exception()
else:
if callback is not None:
self._write_callback = stack_context.wrap(callback)
else:
future = self._write_future = Future()
self._pending_write = self.stream.write(self._format_chunk(chunk))
self._pending_write.add_done_callback(self._on_write_complete)
return future
def finish(self):
"""Implements `.HTTPConnection.finish`."""
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0 and
not self.stream.closed()):
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
if self._chunking_output:
if not self.stream.closed():
self._pending_write = self.stream.write(b"0\r\n\r\n")
self._pending_write.add_done_callback(self._on_write_complete)
self._write_finished = True
# If the app finished the request while we're still reading,
# divert any remaining data away from the delegate and
# close the connection when we're done sending our response.
# Closing the connection is the only way to avoid reading the
# whole input body.
if not self._read_finished:
self._disconnect_on_finish = True
# No more data is coming, so instruct TCP to send any remaining
# data immediately instead of waiting for a full packet or ack.
self.stream.set_nodelay(True)
if self._pending_write is None:
self._finish_request(None)
else:
self._pending_write.add_done_callback(self._finish_request)
def _on_write_complete(self, future):
exc = future.exception()
if exc is not None and not isinstance(exc, iostream.StreamClosedError):
future.result()
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
self.stream.io_loop.add_callback(callback)
if self._write_future is not None:
future = self._write_future
self._write_future = None
future.set_result(None)
def _can_keep_alive(self, start_line, headers):
if self.params.no_keep_alive:
return False
connection_header = headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if start_line.version == "HTTP/1.1":
return connection_header != "close"
elif ("Content-Length" in headers or
headers.get("Transfer-Encoding", "").lower() == "chunked" or
getattr(start_line, 'method', None) in ("HEAD", "GET")):
# start_line may be a request or reponse start line; only
# the former has a method attribute.
return connection_header == "keep-alive"
return False
def _finish_request(self, future):
self._clear_callbacks()
if not self.is_client and self._disconnect_on_finish:
self.close()
return
# Turn Nagle's algorithm back on, leaving the stream in its
# default state for the next request.
self.stream.set_nodelay(False)
if not self._finish_future.done():
self._finish_future.set_result(None)
def _parse_headers(self, data):
# The lstrip removes newlines that some implementations sometimes
# insert between messages of a reused connection. Per RFC 7230,
# we SHOULD ignore at least one empty line before the request.
# http://tools.ietf.org/html/rfc7230#section-3.5
data = native_str(data.decode('latin1')).lstrip("\r\n")
# RFC 7230 section allows for both CRLF and bare LF.
eol = data.find("\n")
start_line = data[:eol].rstrip("\r")
try:
headers = httputil.HTTPHeaders.parse(data[eol:])
except ValueError:
# probably form split() if there was no ':' in the line
raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
data[eol:100])
return start_line, headers
def _read_body(self, code, headers, delegate):
if "Content-Length" in headers:
if "Transfer-Encoding" in headers:
# Response cannot contain both Content-Length and
# Transfer-Encoding headers.
# http://tools.ietf.org/html/rfc7230#section-3.3.3
raise httputil.HTTPInputError(
"Response with both Transfer-Encoding and Content-Length")
if "," in headers["Content-Length"]:
# Proxies sometimes cause Content-Length headers to get
# duplicated. If all the values are identical then we can
# use them but if they differ it's an error.
pieces = re.split(r',\s*', headers["Content-Length"])
if any(i != pieces[0] for i in pieces):
raise httputil.HTTPInputError(
"Multiple unequal Content-Lengths: %r" %
headers["Content-Length"])
headers["Content-Length"] = pieces[0]
try:
content_length = int(headers["Content-Length"])
except ValueError:
# Handles non-integer Content-Length value.
raise httputil.HTTPInputError(
"Only integer Content-Length is allowed: %s" % headers["Content-Length"])
if content_length > self._max_body_size:
raise httputil.HTTPInputError("Content-Length too long")
else:
content_length = None
if code == 204:
# This response code is not allowed to have a non-empty body,
# and has an implicit length of zero instead of read-until-close.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
if ("Transfer-Encoding" in headers or
content_length not in (None, 0)):
raise httputil.HTTPInputError(
"Response with code %d should not have body" % code)
content_length = 0
if content_length is not None:
return self._read_fixed_body(content_length, delegate)
if headers.get("Transfer-Encoding") == "chunked":
return self._read_chunked_body(delegate)
if self.is_client:
return self._read_body_until_close(delegate)
return None
@gen.coroutine
def _read_fixed_body(self, content_length, delegate):
while content_length > 0:
body = yield self.stream.read_bytes(
min(self.params.chunk_size, content_length), partial=True)
content_length -= len(body)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(body)
if ret is not None:
yield ret
@gen.coroutine
def _read_chunked_body(self, delegate):
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
total_size = 0
while True:
chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
chunk_len = int(chunk_len.strip(), 16)
if chunk_len == 0:
return
total_size += chunk_len
if total_size > self._max_body_size:
raise httputil.HTTPInputError("chunked body too large")
bytes_to_read = chunk_len
while bytes_to_read:
chunk = yield self.stream.read_bytes(
min(bytes_to_read, self.params.chunk_size), partial=True)
bytes_to_read -= len(chunk)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(chunk)
if ret is not None:
yield ret
# chunk ends with \r\n
crlf = yield self.stream.read_bytes(2)
assert crlf == b"\r\n"
@gen.coroutine
def _read_body_until_close(self, delegate):
body = yield self.stream.read_until_close()
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
delegate.data_received(body)
class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
"""Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
"""
def __init__(self, delegate, chunk_size):
self._delegate = delegate
self._chunk_size = chunk_size
self._decompressor = None
def headers_received(self, start_line, headers):
if headers.get("Content-Encoding") == "gzip":
self._decompressor = GzipDecompressor()
# Downstream delegates will only see uncompressed data,
# so rename the content-encoding header.
# (but note that curl_httpclient doesn't do this).
headers.add("X-Consumed-Content-Encoding",
headers["Content-Encoding"])
del headers["Content-Encoding"]
return self._delegate.headers_received(start_line, headers)
@gen.coroutine
def data_received(self, chunk):
if self._decompressor:
compressed_data = chunk
while compressed_data:
decompressed = self._decompressor.decompress(
compressed_data, self._chunk_size)
if decompressed:
ret = self._delegate.data_received(decompressed)
if ret is not None:
yield ret
compressed_data = self._decompressor.unconsumed_tail
else:
ret = self._delegate.data_received(chunk)
if ret is not None:
yield ret
def finish(self):
if self._decompressor is not None:
tail = self._decompressor.flush()
if tail:
# I believe the tail will always be empty (i.e.
# decompress will return all it can). The purpose
# of the flush call is to detect errors such
# as truncated input. But in case it ever returns
# anything, treat it as an extra chunk
self._delegate.data_received(tail)
return self._delegate.finish()
def on_connection_close(self):
return self._delegate.on_connection_close()
class HTTP1ServerConnection(object):
"""An HTTP/1.x server."""
def __init__(self, stream, params=None, context=None):
"""
:arg stream: an `.IOStream`
:arg params: a `.HTTP1ConnectionParameters` or None
:arg context: an opaque application-defined object that is accessible
as ``connection.context``
"""
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self._serving_future = None
@gen.coroutine
def close(self):
"""Closes the connection.
Returns a `.Future` that resolves after the serving loop has exited.
"""
self.stream.close()
# Block until the serving loop is done, but ignore any exceptions
# (start_serving is already responsible for logging them).
try:
yield self._serving_future
except Exception:
pass
def start_serving(self, delegate):
"""Starts serving requests on this connection.
:arg delegate: a `.HTTPServerConnectionDelegate`
"""
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
self._serving_future = self._server_request_loop(delegate)
# Register the future on the IOLoop so its errors get logged.
self.stream.io_loop.add_future(self._serving_future,
lambda f: f.result())
@gen.coroutine
def _server_request_loop(self, delegate):
try:
while True:
conn = HTTP1Connection(self.stream, False,
self.params, self.context)
request_delegate = delegate.start_request(self, conn)
try:
ret = yield conn.read_response(request_delegate)
except (iostream.StreamClosedError,
iostream.UnsatisfiableReadError):
return
except _QuietException:
# This exception was already logged.
conn.close()
return
except Exception:
gen_log.error("Uncaught exception", exc_info=True)
conn.close()
return
if not ret:
return
yield gen.moment
finally:
delegate.on_close(self)
#!/usr/bin/env python
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client and server implementations of HTTP/1.x.
.. versionadded:: 4.0
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
from tornado.concurrent import Future
from tornado.escape import native_str, utf8
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado.log import gen_log, app_log
from tornado import stack_context
from tornado.util import GzipDecompressor, PY3
class _QuietException(Exception):
def __init__(self):
pass
class _ExceptionLoggingContext(object):
"""Used with the ``with`` statement when calling delegate methods to
log any exceptions with the given logger. Any exceptions caught are
converted to _QuietException
"""
def __init__(self, logger):
self.logger = logger
def __enter__(self):
pass
def __exit__(self, typ, value, tb):
if value is not None:
self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
raise _QuietException
class HTTP1ConnectionParameters(object):
"""Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
"""
def __init__(self, no_keep_alive=False, chunk_size=None,
max_header_size=None, header_timeout=None, max_body_size=None,
body_timeout=None, decompress=False):
"""
:arg bool no_keep_alive: If true, always close the connection after
one request.
:arg int chunk_size: how much data to read into memory at once
:arg int max_header_size: maximum amount of data for HTTP headers
:arg float header_timeout: how long to wait for all headers (seconds)
:arg int max_body_size: maximum amount of data for body
:arg float body_timeout: how long to wait while reading body (seconds)
:arg bool decompress: if true, decode incoming
``Content-Encoding: gzip``
"""
self.no_keep_alive = no_keep_alive
self.chunk_size = chunk_size or 65536
self.max_header_size = max_header_size or 65536
self.header_timeout = header_timeout
self.max_body_size = max_body_size
self.body_timeout = body_timeout
self.decompress = decompress
class HTTP1Connection(httputil.HTTPConnection):
"""Implements the HTTP/1.x protocol.
This class can be on its own for clients, or via `HTTP1ServerConnection`
for servers.
"""
def __init__(self, stream, is_client, params=None, context=None):
"""
:arg stream: an `.IOStream`
:arg bool is_client: client or server
:arg params: a `.HTTP1ConnectionParameters` instance or ``None``
:arg context: an opaque application-defined object that can be accessed
as ``connection.context``.
"""
self.is_client = is_client
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self.no_keep_alive = params.no_keep_alive
# The body limits can be altered by the delegate, so save them
# here instead of just referencing self.params later.
self._max_body_size = (self.params.max_body_size or
self.stream.max_buffer_size)
self._body_timeout = self.params.body_timeout
# _write_finished is set to True when finish() has been called,
# i.e. there will be no more data sent. Data may still be in the
# stream's write buffer.
self._write_finished = False
# True when we have read the entire incoming body.
self._read_finished = False
# _finish_future resolves when all data has been written and flushed
# to the IOStream.
self._finish_future = Future()
# If true, the connection should be closed after this request
# (after the response has been written in the server side,
# and after it has been read in the client)
self._disconnect_on_finish = False
self._clear_callbacks()
# Save the start lines after we read or write them; they
# affect later processing (e.g. 304 responses and HEAD methods
# have content-length but no bodies)
self._request_start_line = None
self._response_start_line = None
self._request_headers = None
# True if we are writing output with chunked encoding.
self._chunking_output = None
# While reading a body with a content-length, this is the
# amount left to read.
self._expected_content_remaining = None
# A Future for our outgoing writes, returned by IOStream.write.
self._pending_write = None
def read_response(self, delegate):
"""Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to None after the full response has
been read.
"""
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate)
@gen.coroutine
def _read_message(self, delegate):
need_delegate_close = False
try:
header_future = self.stream.read_until_regex(
b"\r?\n\r?\n",
max_bytes=self.params.max_header_size)
if self.params.header_timeout is None:
header_data = yield header_future
else:
try:
header_data = yield gen.with_timeout(
self.stream.io_loop.time() + self.params.header_timeout,
header_future,
io_loop=self.stream.io_loop,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
self.close()
raise gen.Return(False)
start_line, headers = self._parse_headers(header_data)
if self.is_client:
start_line = httputil.parse_response_start_line(start_line)
self._response_start_line = start_line
else:
start_line = httputil.parse_request_start_line(start_line)
self._request_start_line = start_line
self._request_headers = headers
self._disconnect_on_finish = not self._can_keep_alive(
start_line, headers)
need_delegate_close = True
with _ExceptionLoggingContext(app_log):
header_future = delegate.headers_received(start_line, headers)
if header_future is not None:
yield header_future
if self.stream is None:
# We've been detached.
need_delegate_close = False
raise gen.Return(False)
skip_body = False
if self.is_client:
if (self._request_start_line is not None and
self._request_start_line.method == 'HEAD'):
skip_body = True
code = start_line.code
if code == 304:
# 304 responses may include the content-length header
# but do not actually have a body.
# http://tools.ietf.org/html/rfc7230#section-3.3
skip_body = True
if code >= 100 and code < 200:
# 1xx responses should never indicate the presence of
# a body.
if ('Content-Length' in headers or
'Transfer-Encoding' in headers):
raise httputil.HTTPInputError(
"Response code %d cannot have body" % code)
# TODO: client delegates will get headers_received twice
# in the case of a 100-continue. Document or change?
yield self._read_message(delegate)
else:
if (headers.get("Expect") == "100-continue" and
not self._write_finished):
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
if not skip_body:
body_future = self._read_body(
start_line.code if self.is_client else 0, headers, delegate)
if body_future is not None:
if self._body_timeout is None:
yield body_future
else:
try:
yield gen.with_timeout(
self.stream.io_loop.time() + self._body_timeout,
body_future, self.stream.io_loop,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
gen_log.info("Timeout reading body from %s",
self.context)
self.stream.close()
raise gen.Return(False)
self._read_finished = True
if not self._write_finished or self.is_client:
need_delegate_close = False
with _ExceptionLoggingContext(app_log):
delegate.finish()
# If we're waiting for the application to produce an asynchronous
# response, and we're not detached, register a close callback
# on the stream (we didn't need one while we were reading)
if (not self._finish_future.done() and
self.stream is not None and
not self.stream.closed()):
self.stream.set_close_callback(self._on_connection_close)
yield self._finish_future
if self.is_client and self._disconnect_on_finish:
self.close()
if self.stream is None:
raise gen.Return(False)
except httputil.HTTPInputError as e:
gen_log.info("Malformed HTTP message from %s: %s",
self.context, e)
self.close()
raise gen.Return(False)
finally:
if need_delegate_close:
with _ExceptionLoggingContext(app_log):
delegate.on_connection_close()
self._clear_callbacks()
raise gen.Return(True)
def _clear_callbacks(self):
"""Clears the callback attributes.
This allows the request handler to be garbage collected more
quickly in CPython by breaking up reference cycles.
"""
self._write_callback = None
self._write_future = None
self._close_callback = None
if self.stream is not None:
self.stream.set_close_callback(None)
def set_close_callback(self, callback):
"""Sets a callback that will be run when the connection is closed.
.. deprecated:: 4.0
Use `.HTTPMessageDelegate.on_connection_close` instead.
"""
self._close_callback = stack_context.wrap(callback)
def _on_connection_close(self):
# Note that this callback is only registered on the IOStream
# when we have finished reading the request and are waiting for
# the application to produce its response.
if self._close_callback is not None:
callback = self._close_callback
self._close_callback = None
callback()
if not self._finish_future.done():
self._finish_future.set_result(None)
self._clear_callbacks()
def close(self):
if self.stream is not None:
self.stream.close()
self._clear_callbacks()
if not self._finish_future.done():
self._finish_future.set_result(None)
def detach(self):
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all further
HTTP processing. May only be called during
`.HTTPMessageDelegate.headers_received`. Intended for implementing
protocols like websockets that tunnel over an HTTP handshake.
"""
self._clear_callbacks()
stream = self.stream
self.stream = None
if not self._finish_future.done():
self._finish_future.set_result(None)
return stream
def set_body_timeout(self, timeout):
"""Sets the body timeout for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._body_timeout = timeout
def set_max_body_size(self, max_body_size):
"""Sets the body size limit for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._max_body_size = max_body_size
def write_headers(self, start_line, headers, chunk=None, callback=None):
"""Implements `.HTTPConnection.write_headers`."""
lines = []
if self.is_client:
self._request_start_line = start_line
lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
# Client requests with a non-empty body must have either a
# Content-Length or a Transfer-Encoding.
self._chunking_output = (
start_line.method in ('POST', 'PUT', 'PATCH') and
'Content-Length' not in headers and
'Transfer-Encoding' not in headers)
else:
self._response_start_line = start_line
lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2])))
self._chunking_output = (
# TODO: should this use
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == 'HTTP/1.1' and
# 304 responses have no body (not even a zero-length body), and so
# should not have either Content-Length or Transfer-Encoding.
# headers.
start_line.code not in (204, 304) and
# No need to chunk the output if a Content-Length is specified.
'Content-Length' not in headers and
# Applications are discouraged from touching Transfer-Encoding,
# but if they do, leave it alone.
'Transfer-Encoding' not in headers)
# If a 1.0 client asked for keep-alive, add the header.
if (self._request_start_line.version == 'HTTP/1.0' and
(self._request_headers.get('Connection', '').lower() ==
'keep-alive')):
headers['Connection'] = 'Keep-Alive'
if self._chunking_output:
headers['Transfer-Encoding'] = 'chunked'
if (not self.is_client and
(self._request_start_line.method == 'HEAD' or
start_line.code == 304)):
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
# TODO: headers are supposed to be of type str, but we still have some
# cases that let bytes slip through. Remove these native_str calls when those
# are fixed.
header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all())
if PY3:
lines.extend(l.encode('latin1') for l in header_lines)
else:
lines.extend(header_lines)
for line in lines:
if b'\n' in line:
raise ValueError('Newline in header: ' + repr(line))
future = None
if self.stream.closed():
future = self._write_future = Future()
future.set_exception(iostream.StreamClosedError())
future.exception()
else:
if callback is not None:
self._write_callback = stack_context.wrap(callback)
else:
future = self._write_future = Future()
data = b"\r\n".join(lines) + b"\r\n\r\n"
if chunk:
data += self._format_chunk(chunk)
self._pending_write = self.stream.write(data)
self._pending_write.add_done_callback(self._on_write_complete)
return future
def _format_chunk(self, chunk):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
# Close the stream now to stop further framing errors.
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
if self._chunking_output and chunk:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
else:
return chunk
def write(self, chunk, callback=None):
"""Implements `.HTTPConnection.write`.
For backwards compatibility is is allowed but deprecated to
skip `write_headers` and instead call `write()` with a
pre-encoded header block.
"""
future = None
if self.stream.closed():
future = self._write_future = Future()
self._write_future.set_exception(iostream.StreamClosedError())
self._write_future.exception()
else:
if callback is not None:
self._write_callback = stack_context.wrap(callback)
else:
future = self._write_future = Future()
self._pending_write = self.stream.write(self._format_chunk(chunk))
self._pending_write.add_done_callback(self._on_write_complete)
return future
def finish(self):
"""Implements `.HTTPConnection.finish`."""
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0 and
not self.stream.closed()):
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
if self._chunking_output:
if not self.stream.closed():
self._pending_write = self.stream.write(b"0\r\n\r\n")
self._pending_write.add_done_callback(self._on_write_complete)
self._write_finished = True
# If the app finished the request while we're still reading,
# divert any remaining data away from the delegate and
# close the connection when we're done sending our response.
# Closing the connection is the only way to avoid reading the
# whole input body.
if not self._read_finished:
self._disconnect_on_finish = True
# No more data is coming, so instruct TCP to send any remaining
# data immediately instead of waiting for a full packet or ack.
self.stream.set_nodelay(True)
if self._pending_write is None:
self._finish_request(None)
else:
self._pending_write.add_done_callback(self._finish_request)
def _on_write_complete(self, future):
exc = future.exception()
if exc is not None and not isinstance(exc, iostream.StreamClosedError):
future.result()
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
self.stream.io_loop.add_callback(callback)
if self._write_future is not None:
future = self._write_future
self._write_future = None
future.set_result(None)
def _can_keep_alive(self, start_line, headers):
if self.params.no_keep_alive:
return False
connection_header = headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if start_line.version == "HTTP/1.1":
return connection_header != "close"
elif ("Content-Length" in headers or
headers.get("Transfer-Encoding", "").lower() == "chunked" or
getattr(start_line, 'method', None) in ("HEAD", "GET")):
# start_line may be a request or reponse start line; only
# the former has a method attribute.
return connection_header == "keep-alive"
return False
def _finish_request(self, future):
self._clear_callbacks()
if not self.is_client and self._disconnect_on_finish:
self.close()
return
# Turn Nagle's algorithm back on, leaving the stream in its
# default state for the next request.
self.stream.set_nodelay(False)
if not self._finish_future.done():
self._finish_future.set_result(None)
def _parse_headers(self, data):
# The lstrip removes newlines that some implementations sometimes
# insert between messages of a reused connection. Per RFC 7230,
# we SHOULD ignore at least one empty line before the request.
# http://tools.ietf.org/html/rfc7230#section-3.5
data = native_str(data.decode('latin1')).lstrip("\r\n")
# RFC 7230 section allows for both CRLF and bare LF.
eol = data.find("\n")
start_line = data[:eol].rstrip("\r")
try:
headers = httputil.HTTPHeaders.parse(data[eol:])
except ValueError:
# probably form split() if there was no ':' in the line
raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
data[eol:100])
return start_line, headers
def _read_body(self, code, headers, delegate):
if "Content-Length" in headers:
if "Transfer-Encoding" in headers:
# Response cannot contain both Content-Length and
# Transfer-Encoding headers.
# http://tools.ietf.org/html/rfc7230#section-3.3.3
raise httputil.HTTPInputError(
"Response with both Transfer-Encoding and Content-Length")
if "," in headers["Content-Length"]:
# Proxies sometimes cause Content-Length headers to get
# duplicated. If all the values are identical then we can
# use them but if they differ it's an error.
pieces = re.split(r',\s*', headers["Content-Length"])
if any(i != pieces[0] for i in pieces):
raise httputil.HTTPInputError(
"Multiple unequal Content-Lengths: %r" %
headers["Content-Length"])
headers["Content-Length"] = pieces[0]
try:
content_length = int(headers["Content-Length"])
except ValueError:
# Handles non-integer Content-Length value.
raise httputil.HTTPInputError(
"Only integer Content-Length is allowed: %s" % headers["Content-Length"])
if content_length > self._max_body_size:
raise httputil.HTTPInputError("Content-Length too long")
else:
content_length = None
if code == 204:
# This response code is not allowed to have a non-empty body,
# and has an implicit length of zero instead of read-until-close.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
if ("Transfer-Encoding" in headers or
content_length not in (None, 0)):
raise httputil.HTTPInputError(
"Response with code %d should not have body" % code)
content_length = 0
if content_length is not None:
return self._read_fixed_body(content_length, delegate)
if headers.get("Transfer-Encoding", "").lower() == "chunked":
return self._read_chunked_body(delegate)
if self.is_client:
return self._read_body_until_close(delegate)
return None
@gen.coroutine
def _read_fixed_body(self, content_length, delegate):
while content_length > 0:
body = yield self.stream.read_bytes(
min(self.params.chunk_size, content_length), partial=True)
content_length -= len(body)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(body)
if ret is not None:
yield ret
@gen.coroutine
def _read_chunked_body(self, delegate):
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
total_size = 0
while True:
chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
chunk_len = int(chunk_len.strip(), 16)
if chunk_len == 0:
return
total_size += chunk_len
if total_size > self._max_body_size:
raise httputil.HTTPInputError("chunked body too large")
bytes_to_read = chunk_len
while bytes_to_read:
chunk = yield self.stream.read_bytes(
min(bytes_to_read, self.params.chunk_size), partial=True)
bytes_to_read -= len(chunk)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(chunk)
if ret is not None:
yield ret
# chunk ends with \r\n
crlf = yield self.stream.read_bytes(2)
assert crlf == b"\r\n"
@gen.coroutine
def _read_body_until_close(self, delegate):
body = yield self.stream.read_until_close()
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
delegate.data_received(body)
class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
"""Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
"""
def __init__(self, delegate, chunk_size):
self._delegate = delegate
self._chunk_size = chunk_size
self._decompressor = None
def headers_received(self, start_line, headers):
if headers.get("Content-Encoding") == "gzip":
self._decompressor = GzipDecompressor()
# Downstream delegates will only see uncompressed data,
# so rename the content-encoding header.
# (but note that curl_httpclient doesn't do this).
headers.add("X-Consumed-Content-Encoding",
headers["Content-Encoding"])
del headers["Content-Encoding"]
return self._delegate.headers_received(start_line, headers)
@gen.coroutine
def data_received(self, chunk):
if self._decompressor:
compressed_data = chunk
while compressed_data:
decompressed = self._decompressor.decompress(
compressed_data, self._chunk_size)
if decompressed:
ret = self._delegate.data_received(decompressed)
if ret is not None:
yield ret
compressed_data = self._decompressor.unconsumed_tail
else:
ret = self._delegate.data_received(chunk)
if ret is not None:
yield ret
def finish(self):
if self._decompressor is not None:
tail = self._decompressor.flush()
if tail:
# I believe the tail will always be empty (i.e.
# decompress will return all it can). The purpose
# of the flush call is to detect errors such
# as truncated input. But in case it ever returns
# anything, treat it as an extra chunk
self._delegate.data_received(tail)
return self._delegate.finish()
def on_connection_close(self):
return self._delegate.on_connection_close()
class HTTP1ServerConnection(object):
"""An HTTP/1.x server."""
def __init__(self, stream, params=None, context=None):
"""
:arg stream: an `.IOStream`
:arg params: a `.HTTP1ConnectionParameters` or None
:arg context: an opaque application-defined object that is accessible
as ``connection.context``
"""
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self._serving_future = None
@gen.coroutine
def close(self):
"""Closes the connection.
Returns a `.Future` that resolves after the serving loop has exited.
"""
self.stream.close()
# Block until the serving loop is done, but ignore any exceptions
# (start_serving is already responsible for logging them).
try:
yield self._serving_future
except Exception:
pass
def start_serving(self, delegate):
"""Starts serving requests on this connection.
:arg delegate: a `.HTTPServerConnectionDelegate`
"""
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
self._serving_future = self._server_request_loop(delegate)
# Register the future on the IOLoop so its errors get logged.
self.stream.io_loop.add_future(self._serving_future,
lambda f: f.result())
@gen.coroutine
def _server_request_loop(self, delegate):
try:
while True:
conn = HTTP1Connection(self.stream, False,
self.params, self.context)
request_delegate = delegate.start_request(self, conn)
try:
ret = yield conn.read_response(request_delegate)
except (iostream.StreamClosedError,
iostream.UnsatisfiableReadError):
return
except _QuietException:
# This exception was already logged.
conn.close()
return
except Exception:
gen_log.error("Uncaught exception", exc_info=True)
conn.close()
return
if not ret:
return
yield gen.moment
finally:
delegate.on_close(self)
| BugsInPy/BugsInPy/temp/projects/tornado/bug-11-fixed/tornado/tornado/http1connection.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-11-buggy/tornado/tornado/http1connection.py |
tornado-bug-6 | #
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop.
Typical applications will use a single `IOLoop` object, accessed via
`IOLoop.current` class method. The `IOLoop.start` method (or
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
In addition to I/O events, the `IOLoop` can also schedule time-based
events. `IOLoop.add_timeout` is a non-blocking alternative to
`time.sleep`.
"""
from __future__ import absolute_import, division, print_function
import collections
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
import weakref
from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import (
PY3, Configurable, errno_from_exception, timedelta_to_seconds,
TimeoutError, unicode_type, import_object,
)
try:
import signal
except ImportError:
signal = None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
if PY3:
import _thread as thread
else:
import thread
try:
import asyncio
except ImportError:
asyncio = None
_POLL_TIMEOUT = 3600.0
class IOLoop(Configurable):
"""A level-triggered I/O loop.
On Python 3, `IOLoop` is a wrapper around the `asyncio` event
loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD
and Mac OS X) if they are available, or else we fall back on
select(). If you are implementing a system that needs to handle
thousands of simultaneous connections, you should use a system
that supports either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import socket
import tornado.ioloop
from tornado import gen
from tornado.iostream import IOStream
@gen.coroutine
def handle_connection(connection, address):
stream = IOStream(connection)
message = yield stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", 8888))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# In Python 2, _current.instance points to the current IOLoop.
_current = threading.local()
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = weakref.WeakKeyDictionary()
@classmethod
def configure(cls, impl, **kwargs):
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available")
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod
def instance():
"""Deprecated alias for `IOLoop.current()`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
return IOLoop.current()
def install(self):
"""Deprecated alias for `make_current()`.
.. versionchanged:: 5.0
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
self.make_current()
@staticmethod
def clear_instance():
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
"""
IOLoop.clear_current()
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
"""
if asyncio is None:
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
else:
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True)
else:
current = None
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
"""
# The asyncio event loops override this method.
assert asyncio is None
old = getattr(IOLoop._current, "instance", None)
if old is not None:
old.clear_current()
IOLoop._current.instance = self
@staticmethod
def clear_current():
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
"""
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self):
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if asyncio is not None:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
return PollIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = Future()
future_set_exc_info(future_cell[0], sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = Future()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback():
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future_add_done_callback(
future, lambda future: self.add_callback(callback, future))
def run_in_executor(self, executor, func, *args):
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if ThreadPoolExecutor is None:
raise RuntimeError(
"concurrent.futures is required to use IOLoop.run_in_executor")
if executor is None:
if not hasattr(self, '_executor'):
from tornado.process import cpu_count
self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future()
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor):
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = collections.deque()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._pid = os.getpid()
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
@classmethod
def configurable_base(cls):
return PollIOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in list(self._handlers.values()):
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
if hasattr(self, '_executor'):
self._executor.shutdown()
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
if os.getpid() != self._pid:
raise RuntimeError("Cannot share PollIOLoops across processes")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = IOLoop.current(instance=False)
if old_current is not self:
self.make_current()
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
ncallbacks = len(self._callbacks)
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for i in range(ncallbacks):
self._run_callback(self._callbacks.popleft())
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
if old_current is None:
IOLoop.clear_current()
elif old_current is not self:
old_current.make_current()
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if self._closing:
return
# Blindly insert into self._callbacks. This is safe even
# from signal handlers because deque.append is atomic.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if thread.get_ident() != self._thread_ident:
# This will write one byte but Waker.consume() reads many
# at once, so it's ok to write even when not strictly
# necessary.
self._waker.wake()
else:
# If we're on the IOLoop's thread, we don't need to wake anyone.
pass
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, callback, callback_time):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop.
Typical applications will use a single `IOLoop` object, accessed via
`IOLoop.current` class method. The `IOLoop.start` method (or
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
In addition to I/O events, the `IOLoop` can also schedule time-based
events. `IOLoop.add_timeout` is a non-blocking alternative to
`time.sleep`.
"""
from __future__ import absolute_import, division, print_function
import collections
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import (
PY3, Configurable, errno_from_exception, timedelta_to_seconds,
TimeoutError, unicode_type, import_object,
)
try:
import signal
except ImportError:
signal = None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
if PY3:
import _thread as thread
else:
import thread
try:
import asyncio
except ImportError:
asyncio = None
_POLL_TIMEOUT = 3600.0
class IOLoop(Configurable):
"""A level-triggered I/O loop.
On Python 3, `IOLoop` is a wrapper around the `asyncio` event
loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD
and Mac OS X) if they are available, or else we fall back on
select(). If you are implementing a system that needs to handle
thousands of simultaneous connections, you should use a system
that supports either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import socket
import tornado.ioloop
from tornado import gen
from tornado.iostream import IOStream
@gen.coroutine
def handle_connection(connection, address):
stream = IOStream(connection)
message = yield stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", 8888))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# In Python 2, _current.instance points to the current IOLoop.
_current = threading.local()
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = dict()
@classmethod
def configure(cls, impl, **kwargs):
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available")
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod
def instance():
"""Deprecated alias for `IOLoop.current()`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
return IOLoop.current()
def install(self):
"""Deprecated alias for `make_current()`.
.. versionchanged:: 5.0
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
self.make_current()
@staticmethod
def clear_instance():
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
"""
IOLoop.clear_current()
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
"""
if asyncio is None:
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
else:
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True)
else:
current = None
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
"""
# The asyncio event loops override this method.
assert asyncio is None
old = getattr(IOLoop._current, "instance", None)
if old is not None:
old.clear_current()
IOLoop._current.instance = self
@staticmethod
def clear_current():
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
"""
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self):
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if asyncio is not None:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
return PollIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
.. deprecated:: 5.0
Not implemented on the `asyncio` event loop. Use the environment
variable ``PYTHONASYNCIODEBUG=1`` instead.
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = Future()
future_set_exc_info(future_cell[0], sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = Future()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback():
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future_add_done_callback(
future, lambda future: self.add_callback(callback, future))
def run_in_executor(self, executor, func, *args):
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if ThreadPoolExecutor is None:
raise RuntimeError(
"concurrent.futures is required to use IOLoop.run_in_executor")
if executor is None:
if not hasattr(self, '_executor'):
from tornado.process import cpu_count
self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future()
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor):
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = collections.deque()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._pid = os.getpid()
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
@classmethod
def configurable_base(cls):
return PollIOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in list(self._handlers.values()):
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
if hasattr(self, '_executor'):
self._executor.shutdown()
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
if os.getpid() != self._pid:
raise RuntimeError("Cannot share PollIOLoops across processes")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = IOLoop.current(instance=False)
if old_current is not self:
self.make_current()
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
ncallbacks = len(self._callbacks)
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for i in range(ncallbacks):
self._run_callback(self._callbacks.popleft())
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
if old_current is None:
IOLoop.clear_current()
elif old_current is not self:
old_current.make_current()
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if self._closing:
return
# Blindly insert into self._callbacks. This is safe even
# from signal handlers because deque.append is atomic.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if thread.get_ident() != self._thread_ident:
# This will write one byte but Waker.consume() reads many
# at once, so it's ok to write even when not strictly
# necessary.
self._waker.wake()
else:
# If we're on the IOLoop's thread, we don't need to wake anyone.
pass
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, callback, callback_time):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
| BugsInPy/BugsInPy/temp/projects/tornado/bug-6-fixed/tornado/tornado/ioloop.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-6-buggy/tornado/tornado/ioloop.py |
tornado-bug-16 | """``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
import weakref
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import raise_exc_info
try:
from functools import singledispatch # py34+
except ImportError as e:
try:
from singledispatch import singledispatch # backport
except ImportError:
singledispatch = None
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} recieved from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
.. versionadded:: 4.1
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
self_ref = weakref.ref(self)
for future in futures:
future.add_done_callback(functools.partial(
self._done_callback, self_ref))
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
@staticmethod
def _done_callback(self_ref, done):
self = self_ref()
if self is not None:
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call this class directly, as it
will be created automatically as needed. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call `multi_future` explcitly,
since the engine will do so automatically when the generator
yields a list of `Futures`. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
This function is faster than the `Multi` `YieldPoint` because it
does not require the creation of a stack context.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future.set_exc_info(sys.exc_info())
if not future.done():
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
if exc_info is not None:
yielded = self.gen.throw(*exc_info)
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled via multi_future in convert_yielded.
if (isinstance(yielded, list) and
any(isinstance(f, YieldPoint) for f in yielded)):
yielded = Multi(yielded)
elif (isinstance(yielded, dict) and
any(isinstance(f, YieldPoint) for f in yielded.values())):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled separately
# via Multi().
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
"""``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
import weakref
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import raise_exc_info
try:
from functools import singledispatch # py34+
except ImportError as e:
try:
from singledispatch import singledispatch # backport
except ImportError:
singledispatch = None
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} recieved from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
.. versionadded:: 4.1
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
# Use a weak reference to self to avoid cycles that may delay
# garbage collection.
self_ref = weakref.ref(self)
for future in futures:
future.add_done_callback(functools.partial(
self._done_callback, self_ref))
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
# As long as there is an active _running_future, we must
# ensure that the WaitIterator is not GC'd (due to the
# use of weak references in __init__). Add a callback that
# references self so there is a hard reference that will be
# cleared automatically when this Future finishes.
self._running_future.add_done_callback(lambda f: self)
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
@staticmethod
def _done_callback(self_ref, done):
self = self_ref()
if self is not None:
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call this class directly, as it
will be created automatically as needed. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call `multi_future` explcitly,
since the engine will do so automatically when the generator
yields a list of `Futures`. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
This function is faster than the `Multi` `YieldPoint` because it
does not require the creation of a stack context.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future.set_exc_info(sys.exc_info())
if not future.done():
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
if exc_info is not None:
yielded = self.gen.throw(*exc_info)
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled via multi_future in convert_yielded.
if (isinstance(yielded, list) and
any(isinstance(f, YieldPoint) for f in yielded)):
yielded = Multi(yielded)
elif (isinstance(yielded, dict) and
any(isinstance(f, YieldPoint) for f in yielded.values())):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled separately
# via Multi().
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
| BugsInPy/BugsInPy/temp/projects/tornado/bug-16-fixed/tornado/tornado/gen.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-16-buggy/tornado/tornado/gen.py |
tornado-bug-14 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None and is_future(ret):
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) / callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None and is_future(ret):
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) / callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
| BugsInPy/BugsInPy/temp/projects/tornado/bug-14-fixed/tornado/tornado/ioloop.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-14-buggy/tornado/tornado/ioloop.py |
tornado-bug-9 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""HTTP utility code shared by clients and servers.
This module also defines the `HTTPServerRequest` class which is exposed
via `tornado.web.RequestHandler.request`.
"""
from __future__ import absolute_import, division, print_function
import calendar
import collections
import copy
import datetime
import email.utils
import numbers
import re
import time
from tornado.escape import native_str, parse_qs_bytes, utf8
from tornado.log import gen_log
from tornado.util import ObjectDict, PY3
if PY3:
import http.cookies as Cookie
from http.client import responses
from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
else:
import Cookie
from httplib import responses
from urllib import urlencode
from urlparse import urlparse, urlunparse, parse_qsl
# responses is unused in this file, but we re-export it to other files.
# Reference it so pyflakes doesn't complain.
responses
try:
from ssl import SSLError
except ImportError:
# ssl is unavailable on app engine.
class _SSLError(Exception):
pass
# Hack around a mypy limitation. We can't simply put "type: ignore"
# on the class definition itself; must go through an assignment.
SSLError = _SSLError # type: ignore
try:
import typing
except ImportError:
pass
# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line
# terminator and ignore any preceding CR.
_CRLF_RE = re.compile(r'\r?\n')
class _NormalizedHeaderCache(dict):
"""Dynamic cached mapping of header names to Http-Header-Case.
Implemented as a dict subclass so that cache hits are as fast as a
normal dict lookup, without the overhead of a python function
call.
>>> normalized_headers = _NormalizedHeaderCache(10)
>>> normalized_headers["coNtent-TYPE"]
'Content-Type'
"""
def __init__(self, size):
super(_NormalizedHeaderCache, self).__init__()
self.size = size
self.queue = collections.deque()
def __missing__(self, key):
normalized = "-".join([w.capitalize() for w in key.split("-")])
self[key] = normalized
self.queue.append(key)
if len(self.queue) > self.size:
# Limit the size of the cache. LRU would be better, but this
# simpler approach should be fine. In Python 2.7+ we could
# use OrderedDict (or in 3.2+, @functools.lru_cache).
old_key = self.queue.popleft()
del self[old_key]
return normalized
_normalized_headers = _NormalizedHeaderCache(1000)
class HTTPHeaders(collections.MutableMapping):
"""A dictionary that maintains ``Http-Header-Case`` for all keys.
Supports multiple values per key via a pair of new methods,
`add()` and `get_list()`. The regular dictionary interface
returns a single value per key, with multiple values joined by a
comma.
>>> h = HTTPHeaders({"content-type": "text/html"})
>>> list(h.keys())
['Content-Type']
>>> h["Content-Type"]
'text/html'
>>> h.add("Set-Cookie", "A=B")
>>> h.add("Set-Cookie", "C=D")
>>> h["set-cookie"]
'A=B,C=D'
>>> h.get_list("set-cookie")
['A=B', 'C=D']
>>> for (k,v) in sorted(h.get_all()):
... print('%s: %s' % (k,v))
...
Content-Type: text/html
Set-Cookie: A=B
Set-Cookie: C=D
"""
def __init__(self, *args, **kwargs):
self._dict = {} # type: typing.Dict[str, str]
self._as_list = {} # type: typing.Dict[str, typing.List[str]]
self._last_key = None
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], HTTPHeaders)):
# Copy constructor
for k, v in args[0].get_all():
self.add(k, v)
else:
# Dict-style initialization
self.update(*args, **kwargs)
# new public methods
def add(self, name, value):
# type: (str, str) -> None
"""Adds a new value for the given key."""
norm_name = _normalized_headers[name]
self._last_key = norm_name
if norm_name in self:
self._dict[norm_name] = (native_str(self[norm_name]) + ',' +
native_str(value))
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name):
"""Returns all values for the given header as a list."""
norm_name = _normalized_headers[name]
return self._as_list.get(norm_name, [])
def get_all(self):
# type: () -> typing.Iterable[typing.Tuple[str, str]]
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, values in self._as_list.items():
for value in values:
yield (name, value)
def parse_line(self, line):
"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
"""
if line[0].isspace():
# continuation of a multi-line header
new_part = ' ' + line.lstrip()
self._as_list[self._last_key][-1] += new_part
self._dict[self._last_key] += new_part
else:
name, value = line.split(":", 1)
self.add(name, value.strip())
@classmethod
def parse(cls, headers):
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
"""
h = cls()
for line in _CRLF_RE.split(headers):
if line:
h.parse_line(line)
return h
# MutableMapping abstract method implementations.
def __setitem__(self, name, value):
norm_name = _normalized_headers[name]
self._dict[norm_name] = value
self._as_list[norm_name] = [value]
def __getitem__(self, name):
# type: (str) -> str
return self._dict[_normalized_headers[name]]
def __delitem__(self, name):
norm_name = _normalized_headers[name]
del self._dict[norm_name]
del self._as_list[norm_name]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def copy(self):
# defined in dict but not in MutableMapping.
return HTTPHeaders(self)
# Use our overridden copy method for the copy.copy module.
# This makes shallow copies one level deeper, but preserves
# the appearance that HTTPHeaders is a single container.
__copy__ = copy
def __str__(self):
lines = []
for name, value in self.get_all():
lines.append("%s: %s\n" % (name, value))
return "".join(lines)
__unicode__ = __str__
class HTTPServerRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`.HTTPHeaders` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
.. versionchanged:: 3.1
The list format of ``X-Forwarded-For`` is now supported.
.. attribute:: protocol
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`.RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: query_arguments
Same format as ``arguments``, but contains only arguments extracted
from the query string.
.. versionadded:: 3.2
.. attribute:: body_arguments
Same format as ``arguments``, but contains only arguments extracted
from the request body.
.. versionadded:: 3.2
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of `.HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
.. versionchanged:: 4.0
Moved from ``tornado.httpserver.HTTPRequest``.
"""
def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None,
body=None, host=None, files=None, connection=None,
start_line=None, server_connection=None):
if start_line is not None:
method, uri, version = start_line
self.method = method
self.uri = uri
self.version = version
self.headers = headers or HTTPHeaders()
self.body = body or b""
# set remote IP and protocol
context = getattr(connection, 'context', None)
self.remote_ip = getattr(context, 'remote_ip', None)
self.protocol = getattr(context, 'protocol', "http")
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.host_name = split_host_and_port(self.host.lower())[0]
self.files = files or {}
self.connection = connection
self.server_connection = server_connection
self._start_time = time.time()
self._finish_time = None
self.path, sep, self.query = uri.partition('?')
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
self.query_arguments = copy.deepcopy(self.arguments)
self.body_arguments = {}
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics.
.. deprecated:: 4.0
Applications are less likely to need this information with the
introduction of `.HTTPConnection`. If you still need it, access
the ``version`` attribute directly.
"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
parsed = parse_cookie(self.headers["Cookie"])
except Exception:
pass
else:
for k, v in parsed.items():
try:
self._cookies[k] = v
except Exception:
# SimpleCookie imposes some restrictions on keys;
# parse_cookie does not. Discard any cookies
# with disallowed keys.
pass
return self._cookies
def write(self, chunk, callback=None):
"""Writes the given chunk to the response stream.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
assert isinstance(chunk, bytes)
assert self.version.startswith("HTTP/1."), \
"deprecated interface only supported in HTTP/1.x"
self.connection.write(chunk, callback=callback)
def finish(self):
"""Finishes this HTTP request on the open connection.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self, binary_form=False):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer's
`ssl.SSLContext.verify_mode` field must be set, e.g.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain("foo.crt", "foo.key")
ssl_ctx.load_verify_locations("cacerts.pem")
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
server = HTTPServer(app, ssl_options=ssl_ctx)
By default, the return value is a dictionary (or None, if no
client certificate is present). If ``binary_form`` is true, a
DER-encoded form of the certificate is returned instead. See
SSLSocket.getpeercert() in the standard library for more
details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert(
binary_form=binary_form)
except SSLError:
return None
def _parse_body(self):
parse_body_arguments(
self.headers.get("Content-Type", ""), self.body,
self.body_arguments, self.files,
self.headers)
for k, v in self.body_arguments.items():
self.arguments.setdefault(k, []).extend(v)
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
class HTTPInputError(Exception):
"""Exception class for malformed HTTP requests or responses
from remote sources.
.. versionadded:: 4.0
"""
pass
class HTTPOutputError(Exception):
"""Exception class for errors in HTTP output.
.. versionadded:: 4.0
"""
pass
class HTTPServerConnectionDelegate(object):
"""Implement this interface to handle requests from `.HTTPServer`.
.. versionadded:: 4.0
"""
def start_request(self, server_conn, request_conn):
"""This method is called by the server when a new request has started.
:arg server_conn: is an opaque object representing the long-lived
(e.g. tcp-level) connection.
:arg request_conn: is a `.HTTPConnection` object for a single
request/response exchange.
This method should return a `.HTTPMessageDelegate`.
"""
raise NotImplementedError()
def on_close(self, server_conn):
"""This method is called when a connection has been closed.
:arg server_conn: is a server connection that has previously been
passed to ``start_request``.
"""
pass
class HTTPMessageDelegate(object):
"""Implement this interface to handle an HTTP request or response.
.. versionadded:: 4.0
"""
def headers_received(self, start_line, headers):
"""Called when the HTTP headers have been received and parsed.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
depending on whether this is a client or server message.
:arg headers: a `.HTTPHeaders` instance.
Some `.HTTPConnection` methods can only be called during
``headers_received``.
May return a `.Future`; if it does the body will not be read
until it is done.
"""
pass
def data_received(self, chunk):
"""Called when a chunk of data has been received.
May return a `.Future` for flow control.
"""
pass
def finish(self):
"""Called after the last chunk of data has been received."""
pass
def on_connection_close(self):
"""Called if the connection is closed without finishing the request.
If ``headers_received`` is called, either ``finish`` or
``on_connection_close`` will be called, but not both.
"""
pass
class HTTPConnection(object):
"""Applications use this interface to write their responses.
.. versionadded:: 4.0
"""
def write_headers(self, start_line, headers, chunk=None, callback=None):
"""Write an HTTP header block.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
:arg headers: a `.HTTPHeaders` instance.
:arg chunk: the first (optional) chunk of data. This is an optimization
so that small responses can be written in the same call as their
headers.
:arg callback: a callback to be run when the write is complete.
The ``version`` field of ``start_line`` is ignored.
Returns a `.Future` if no callback is given.
"""
raise NotImplementedError()
def write(self, chunk, callback=None):
"""Writes a chunk of body data.
The callback will be run when the write is complete. If no callback
is given, returns a Future.
"""
raise NotImplementedError()
def finish(self):
"""Indicates that the last body data has been written.
"""
raise NotImplementedError()
def url_concat(url, args):
"""Concatenate url and arguments regardless of whether
url has existing query parameters.
``args`` may be either a dictionary or a list of key-value pairs
(the latter allows for multiple values with the same key.
>>> url_concat("http://example.com/foo", dict(c="d"))
'http://example.com/foo?c=d'
>>> url_concat("http://example.com/foo?a=b", dict(c="d"))
'http://example.com/foo?a=b&c=d'
>>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")])
'http://example.com/foo?a=b&c=d&c=d2'
"""
parsed_url = urlparse(url)
if isinstance(args, dict):
parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
parsed_query.extend(args.items())
elif isinstance(args, list) or isinstance(args, tuple):
parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
parsed_query.extend(args)
else:
err = "'args' parameter should be dict, list or tuple. Not {0}".format(
type(args))
raise TypeError(err)
final_query = urlencode(parsed_query)
url = urlunparse((
parsed_url[0],
parsed_url[1],
parsed_url[2],
parsed_url[3],
final_query,
parsed_url[5]))
return url
class HTTPFile(ObjectDict):
"""Represents a file uploaded via a form.
For backwards compatibility, its instance attributes are also
accessible as dictionary keys.
* ``filename``
* ``body``
* ``content_type``
"""
pass
def _parse_request_range(range_header):
"""Parses a Range header.
Returns either ``None`` or tuple ``(start, end)``.
Note that while the HTTP headers use inclusive byte positions,
this method returns indexes suitable for use in slices.
>>> start, end = _parse_request_range("bytes=1-2")
>>> start, end
(1, 3)
>>> [0, 1, 2, 3, 4][start:end]
[1, 2]
>>> _parse_request_range("bytes=6-")
(6, None)
>>> _parse_request_range("bytes=-6")
(-6, None)
>>> _parse_request_range("bytes=-0")
(None, 0)
>>> _parse_request_range("bytes=")
(None, None)
>>> _parse_request_range("foo=42")
>>> _parse_request_range("bytes=1-2,6-10")
Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
See [0] for the details of the range header.
[0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
"""
unit, _, value = range_header.partition("=")
unit, value = unit.strip(), value.strip()
if unit != "bytes":
return None
start_b, _, end_b = value.partition("-")
try:
start = _int_or_none(start_b)
end = _int_or_none(end_b)
except ValueError:
return None
if end is not None:
if start is None:
if end != 0:
start = -end
end = None
else:
end += 1
return (start, end)
def _get_content_range(start, end, total):
"""Returns a suitable Content-Range header:
>>> print(_get_content_range(None, 1, 4))
bytes 0-0/4
>>> print(_get_content_range(1, 3, 4))
bytes 1-2/4
>>> print(_get_content_range(None, None, 4))
bytes 0-3/4
"""
start = start or 0
end = (end or total) - 1
return "bytes %s-%s/%s" % (start, end, total)
def _int_or_none(val):
val = val.strip()
if val == "":
return None
return int(val)
def parse_body_arguments(content_type, body, arguments, files, headers=None):
"""Parses a form request body.
Supports ``application/x-www-form-urlencoded`` and
``multipart/form-data``. The ``content_type`` parameter should be
a string and ``body`` should be a byte string. The ``arguments``
and ``files`` parameters are dictionaries that will be updated
with the parsed contents.
"""
if headers and 'Content-Encoding' in headers:
gen_log.warning("Unsupported Content-Encoding: %s",
headers['Content-Encoding'])
return
if content_type.startswith("application/x-www-form-urlencoded"):
try:
uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
except Exception as e:
gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
uri_arguments = {}
for name, values in uri_arguments.items():
if values:
arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
try:
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
parse_multipart_form_data(utf8(v), body, arguments, files)
break
else:
raise ValueError("multipart boundary not found")
except Exception as e:
gen_log.warning("Invalid multipart/form-data: %s", e)
def parse_multipart_form_data(boundary, data, arguments, files):
"""Parses a ``multipart/form-data`` body.
The ``boundary`` and ``data`` parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b'"') and boundary.endswith(b'"'):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b"--" + boundary + b"--")
if final_boundary_index == -1:
gen_log.warning("Invalid multipart/form-data: no final boundary")
return
parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
for part in parts:
if not part:
continue
eoh = part.find(b"\r\n\r\n")
if eoh == -1:
gen_log.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b"\r\n"):
gen_log.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name"):
gen_log.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(HTTPFile( # type: ignore
filename=disp_params["filename"], body=value,
content_type=ctype))
else:
arguments.setdefault(name, []).append(value)
def format_timestamp(ts):
"""Formats a timestamp in the format used by HTTP.
The argument may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
object.
>>> format_timestamp(1359312200)
'Sun, 27 Jan 2013 18:43:20 GMT'
"""
if isinstance(ts, numbers.Real):
pass
elif isinstance(ts, (tuple, time.struct_time)):
ts = calendar.timegm(ts)
elif isinstance(ts, datetime.datetime):
ts = calendar.timegm(ts.utctimetuple())
else:
raise TypeError("unknown timestamp type: %r" % ts)
return email.utils.formatdate(ts, usegmt=True)
RequestStartLine = collections.namedtuple(
'RequestStartLine', ['method', 'path', 'version'])
def parse_request_start_line(line):
"""Returns a (method, path, version) tuple for an HTTP 1.x request line.
The response is a `collections.namedtuple`.
>>> parse_request_start_line("GET /foo HTTP/1.1")
RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
"""
try:
method, path, version = line.split(" ")
except ValueError:
raise HTTPInputError("Malformed HTTP request line")
if not re.match(r"^HTTP/1\.[0-9]$", version):
raise HTTPInputError(
"Malformed HTTP version in HTTP Request-Line: %r" % version)
return RequestStartLine(method, path, version)
ResponseStartLine = collections.namedtuple(
'ResponseStartLine', ['version', 'code', 'reason'])
def parse_response_start_line(line):
"""Returns a (version, code, reason) tuple for an HTTP 1.x response line.
The response is a `collections.namedtuple`.
>>> parse_response_start_line("HTTP/1.1 200 OK")
ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
"""
line = native_str(line)
match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line)
if not match:
raise HTTPInputError("Error parsing response start line")
return ResponseStartLine(match.group(1), int(match.group(2)),
match.group(3))
# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
# The original 2.7 version of this code did not correctly support some
# combinations of semicolons and double quotes.
# It has also been modified to support valueless parameters as seen in
# websocket extension negotiations.
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def _parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
else:
pdict[p] = None
return key, pdict
def _encode_header(key, pdict):
"""Inverse of _parse_header.
>>> _encode_header('permessage-deflate',
... {'client_max_window_bits': 15, 'client_no_context_takeover': None})
'permessage-deflate; client_max_window_bits=15; client_no_context_takeover'
"""
if not pdict:
return key
out = [key]
# Sort the parameters just to make it easy to test.
for k, v in sorted(pdict.items()):
if v is None:
out.append(k)
else:
# TODO: quote if necessary.
out.append('%s=%s' % (k, v))
return '; '.join(out)
def doctests():
import doctest
return doctest.DocTestSuite()
def split_host_and_port(netloc):
"""Returns ``(host, port)`` tuple from ``netloc``.
Returned ``port`` will be ``None`` if not present.
.. versionadded:: 4.1
"""
match = re.match(r'^(.+):(\d+)$', netloc)
if match:
host = match.group(1)
port = int(match.group(2))
else:
host = netloc
port = None
return (host, port)
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
_nulljoin = ''.join
def _unquote_cookie(str):
"""Handle double quotes and escaping in cookie values.
This method is copied verbatim from the Python 3.5 standard
library (http.cookies._unquote) so we don't have to depend on
non-public interfaces.
"""
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if str is None or len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k + 1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j + 1:j + 4], 8)))
i = j + 4
return _nulljoin(res)
def parse_cookie(cookie):
"""Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
.. versionadded:: 4.4.2
"""
cookiedict = {}
for chunk in cookie.split(str(';')):
if str('=') in chunk:
key, val = chunk.split(str('='), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(''), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = _unquote_cookie(val)
return cookiedict
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""HTTP utility code shared by clients and servers.
This module also defines the `HTTPServerRequest` class which is exposed
via `tornado.web.RequestHandler.request`.
"""
from __future__ import absolute_import, division, print_function
import calendar
import collections
import copy
import datetime
import email.utils
import numbers
import re
import time
from tornado.escape import native_str, parse_qs_bytes, utf8
from tornado.log import gen_log
from tornado.util import ObjectDict, PY3
if PY3:
import http.cookies as Cookie
from http.client import responses
from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
else:
import Cookie
from httplib import responses
from urllib import urlencode
from urlparse import urlparse, urlunparse, parse_qsl
# responses is unused in this file, but we re-export it to other files.
# Reference it so pyflakes doesn't complain.
responses
try:
from ssl import SSLError
except ImportError:
# ssl is unavailable on app engine.
class _SSLError(Exception):
pass
# Hack around a mypy limitation. We can't simply put "type: ignore"
# on the class definition itself; must go through an assignment.
SSLError = _SSLError # type: ignore
try:
import typing
except ImportError:
pass
# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line
# terminator and ignore any preceding CR.
_CRLF_RE = re.compile(r'\r?\n')
class _NormalizedHeaderCache(dict):
"""Dynamic cached mapping of header names to Http-Header-Case.
Implemented as a dict subclass so that cache hits are as fast as a
normal dict lookup, without the overhead of a python function
call.
>>> normalized_headers = _NormalizedHeaderCache(10)
>>> normalized_headers["coNtent-TYPE"]
'Content-Type'
"""
def __init__(self, size):
super(_NormalizedHeaderCache, self).__init__()
self.size = size
self.queue = collections.deque()
def __missing__(self, key):
normalized = "-".join([w.capitalize() for w in key.split("-")])
self[key] = normalized
self.queue.append(key)
if len(self.queue) > self.size:
# Limit the size of the cache. LRU would be better, but this
# simpler approach should be fine. In Python 2.7+ we could
# use OrderedDict (or in 3.2+, @functools.lru_cache).
old_key = self.queue.popleft()
del self[old_key]
return normalized
_normalized_headers = _NormalizedHeaderCache(1000)
class HTTPHeaders(collections.MutableMapping):
"""A dictionary that maintains ``Http-Header-Case`` for all keys.
Supports multiple values per key via a pair of new methods,
`add()` and `get_list()`. The regular dictionary interface
returns a single value per key, with multiple values joined by a
comma.
>>> h = HTTPHeaders({"content-type": "text/html"})
>>> list(h.keys())
['Content-Type']
>>> h["Content-Type"]
'text/html'
>>> h.add("Set-Cookie", "A=B")
>>> h.add("Set-Cookie", "C=D")
>>> h["set-cookie"]
'A=B,C=D'
>>> h.get_list("set-cookie")
['A=B', 'C=D']
>>> for (k,v) in sorted(h.get_all()):
... print('%s: %s' % (k,v))
...
Content-Type: text/html
Set-Cookie: A=B
Set-Cookie: C=D
"""
def __init__(self, *args, **kwargs):
self._dict = {} # type: typing.Dict[str, str]
self._as_list = {} # type: typing.Dict[str, typing.List[str]]
self._last_key = None
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], HTTPHeaders)):
# Copy constructor
for k, v in args[0].get_all():
self.add(k, v)
else:
# Dict-style initialization
self.update(*args, **kwargs)
# new public methods
def add(self, name, value):
# type: (str, str) -> None
"""Adds a new value for the given key."""
norm_name = _normalized_headers[name]
self._last_key = norm_name
if norm_name in self:
self._dict[norm_name] = (native_str(self[norm_name]) + ',' +
native_str(value))
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name):
"""Returns all values for the given header as a list."""
norm_name = _normalized_headers[name]
return self._as_list.get(norm_name, [])
def get_all(self):
# type: () -> typing.Iterable[typing.Tuple[str, str]]
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, values in self._as_list.items():
for value in values:
yield (name, value)
def parse_line(self, line):
"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
"""
if line[0].isspace():
# continuation of a multi-line header
new_part = ' ' + line.lstrip()
self._as_list[self._last_key][-1] += new_part
self._dict[self._last_key] += new_part
else:
name, value = line.split(":", 1)
self.add(name, value.strip())
@classmethod
def parse(cls, headers):
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
"""
h = cls()
for line in _CRLF_RE.split(headers):
if line:
h.parse_line(line)
return h
# MutableMapping abstract method implementations.
def __setitem__(self, name, value):
norm_name = _normalized_headers[name]
self._dict[norm_name] = value
self._as_list[norm_name] = [value]
def __getitem__(self, name):
# type: (str) -> str
return self._dict[_normalized_headers[name]]
def __delitem__(self, name):
norm_name = _normalized_headers[name]
del self._dict[norm_name]
del self._as_list[norm_name]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def copy(self):
# defined in dict but not in MutableMapping.
return HTTPHeaders(self)
# Use our overridden copy method for the copy.copy module.
# This makes shallow copies one level deeper, but preserves
# the appearance that HTTPHeaders is a single container.
__copy__ = copy
def __str__(self):
lines = []
for name, value in self.get_all():
lines.append("%s: %s\n" % (name, value))
return "".join(lines)
__unicode__ = __str__
class HTTPServerRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`.HTTPHeaders` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
.. versionchanged:: 3.1
The list format of ``X-Forwarded-For`` is now supported.
.. attribute:: protocol
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`.RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: query_arguments
Same format as ``arguments``, but contains only arguments extracted
from the query string.
.. versionadded:: 3.2
.. attribute:: body_arguments
Same format as ``arguments``, but contains only arguments extracted
from the request body.
.. versionadded:: 3.2
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of `.HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
.. versionchanged:: 4.0
Moved from ``tornado.httpserver.HTTPRequest``.
"""
def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None,
body=None, host=None, files=None, connection=None,
start_line=None, server_connection=None):
if start_line is not None:
method, uri, version = start_line
self.method = method
self.uri = uri
self.version = version
self.headers = headers or HTTPHeaders()
self.body = body or b""
# set remote IP and protocol
context = getattr(connection, 'context', None)
self.remote_ip = getattr(context, 'remote_ip', None)
self.protocol = getattr(context, 'protocol', "http")
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.host_name = split_host_and_port(self.host.lower())[0]
self.files = files or {}
self.connection = connection
self.server_connection = server_connection
self._start_time = time.time()
self._finish_time = None
self.path, sep, self.query = uri.partition('?')
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
self.query_arguments = copy.deepcopy(self.arguments)
self.body_arguments = {}
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics.
.. deprecated:: 4.0
Applications are less likely to need this information with the
introduction of `.HTTPConnection`. If you still need it, access
the ``version`` attribute directly.
"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
parsed = parse_cookie(self.headers["Cookie"])
except Exception:
pass
else:
for k, v in parsed.items():
try:
self._cookies[k] = v
except Exception:
# SimpleCookie imposes some restrictions on keys;
# parse_cookie does not. Discard any cookies
# with disallowed keys.
pass
return self._cookies
def write(self, chunk, callback=None):
"""Writes the given chunk to the response stream.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
assert isinstance(chunk, bytes)
assert self.version.startswith("HTTP/1."), \
"deprecated interface only supported in HTTP/1.x"
self.connection.write(chunk, callback=callback)
def finish(self):
"""Finishes this HTTP request on the open connection.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self, binary_form=False):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer's
`ssl.SSLContext.verify_mode` field must be set, e.g.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain("foo.crt", "foo.key")
ssl_ctx.load_verify_locations("cacerts.pem")
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
server = HTTPServer(app, ssl_options=ssl_ctx)
By default, the return value is a dictionary (or None, if no
client certificate is present). If ``binary_form`` is true, a
DER-encoded form of the certificate is returned instead. See
SSLSocket.getpeercert() in the standard library for more
details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert(
binary_form=binary_form)
except SSLError:
return None
def _parse_body(self):
parse_body_arguments(
self.headers.get("Content-Type", ""), self.body,
self.body_arguments, self.files,
self.headers)
for k, v in self.body_arguments.items():
self.arguments.setdefault(k, []).extend(v)
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
class HTTPInputError(Exception):
"""Exception class for malformed HTTP requests or responses
from remote sources.
.. versionadded:: 4.0
"""
pass
class HTTPOutputError(Exception):
"""Exception class for errors in HTTP output.
.. versionadded:: 4.0
"""
pass
class HTTPServerConnectionDelegate(object):
"""Implement this interface to handle requests from `.HTTPServer`.
.. versionadded:: 4.0
"""
def start_request(self, server_conn, request_conn):
"""This method is called by the server when a new request has started.
:arg server_conn: is an opaque object representing the long-lived
(e.g. tcp-level) connection.
:arg request_conn: is a `.HTTPConnection` object for a single
request/response exchange.
This method should return a `.HTTPMessageDelegate`.
"""
raise NotImplementedError()
def on_close(self, server_conn):
"""This method is called when a connection has been closed.
:arg server_conn: is a server connection that has previously been
passed to ``start_request``.
"""
pass
class HTTPMessageDelegate(object):
"""Implement this interface to handle an HTTP request or response.
.. versionadded:: 4.0
"""
def headers_received(self, start_line, headers):
"""Called when the HTTP headers have been received and parsed.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
depending on whether this is a client or server message.
:arg headers: a `.HTTPHeaders` instance.
Some `.HTTPConnection` methods can only be called during
``headers_received``.
May return a `.Future`; if it does the body will not be read
until it is done.
"""
pass
def data_received(self, chunk):
"""Called when a chunk of data has been received.
May return a `.Future` for flow control.
"""
pass
def finish(self):
"""Called after the last chunk of data has been received."""
pass
def on_connection_close(self):
"""Called if the connection is closed without finishing the request.
If ``headers_received`` is called, either ``finish`` or
``on_connection_close`` will be called, but not both.
"""
pass
class HTTPConnection(object):
"""Applications use this interface to write their responses.
.. versionadded:: 4.0
"""
def write_headers(self, start_line, headers, chunk=None, callback=None):
"""Write an HTTP header block.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
:arg headers: a `.HTTPHeaders` instance.
:arg chunk: the first (optional) chunk of data. This is an optimization
so that small responses can be written in the same call as their
headers.
:arg callback: a callback to be run when the write is complete.
The ``version`` field of ``start_line`` is ignored.
Returns a `.Future` if no callback is given.
"""
raise NotImplementedError()
def write(self, chunk, callback=None):
"""Writes a chunk of body data.
The callback will be run when the write is complete. If no callback
is given, returns a Future.
"""
raise NotImplementedError()
def finish(self):
"""Indicates that the last body data has been written.
"""
raise NotImplementedError()
def url_concat(url, args):
"""Concatenate url and arguments regardless of whether
url has existing query parameters.
``args`` may be either a dictionary or a list of key-value pairs
(the latter allows for multiple values with the same key.
>>> url_concat("http://example.com/foo", dict(c="d"))
'http://example.com/foo?c=d'
>>> url_concat("http://example.com/foo?a=b", dict(c="d"))
'http://example.com/foo?a=b&c=d'
>>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")])
'http://example.com/foo?a=b&c=d&c=d2'
"""
if args is None:
return url
parsed_url = urlparse(url)
if isinstance(args, dict):
parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
parsed_query.extend(args.items())
elif isinstance(args, list) or isinstance(args, tuple):
parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
parsed_query.extend(args)
else:
err = "'args' parameter should be dict, list or tuple. Not {0}".format(
type(args))
raise TypeError(err)
final_query = urlencode(parsed_query)
url = urlunparse((
parsed_url[0],
parsed_url[1],
parsed_url[2],
parsed_url[3],
final_query,
parsed_url[5]))
return url
class HTTPFile(ObjectDict):
"""Represents a file uploaded via a form.
For backwards compatibility, its instance attributes are also
accessible as dictionary keys.
* ``filename``
* ``body``
* ``content_type``
"""
pass
def _parse_request_range(range_header):
"""Parses a Range header.
Returns either ``None`` or tuple ``(start, end)``.
Note that while the HTTP headers use inclusive byte positions,
this method returns indexes suitable for use in slices.
>>> start, end = _parse_request_range("bytes=1-2")
>>> start, end
(1, 3)
>>> [0, 1, 2, 3, 4][start:end]
[1, 2]
>>> _parse_request_range("bytes=6-")
(6, None)
>>> _parse_request_range("bytes=-6")
(-6, None)
>>> _parse_request_range("bytes=-0")
(None, 0)
>>> _parse_request_range("bytes=")
(None, None)
>>> _parse_request_range("foo=42")
>>> _parse_request_range("bytes=1-2,6-10")
Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
See [0] for the details of the range header.
[0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
"""
unit, _, value = range_header.partition("=")
unit, value = unit.strip(), value.strip()
if unit != "bytes":
return None
start_b, _, end_b = value.partition("-")
try:
start = _int_or_none(start_b)
end = _int_or_none(end_b)
except ValueError:
return None
if end is not None:
if start is None:
if end != 0:
start = -end
end = None
else:
end += 1
return (start, end)
def _get_content_range(start, end, total):
"""Returns a suitable Content-Range header:
>>> print(_get_content_range(None, 1, 4))
bytes 0-0/4
>>> print(_get_content_range(1, 3, 4))
bytes 1-2/4
>>> print(_get_content_range(None, None, 4))
bytes 0-3/4
"""
start = start or 0
end = (end or total) - 1
return "bytes %s-%s/%s" % (start, end, total)
def _int_or_none(val):
val = val.strip()
if val == "":
return None
return int(val)
def parse_body_arguments(content_type, body, arguments, files, headers=None):
"""Parses a form request body.
Supports ``application/x-www-form-urlencoded`` and
``multipart/form-data``. The ``content_type`` parameter should be
a string and ``body`` should be a byte string. The ``arguments``
and ``files`` parameters are dictionaries that will be updated
with the parsed contents.
"""
if headers and 'Content-Encoding' in headers:
gen_log.warning("Unsupported Content-Encoding: %s",
headers['Content-Encoding'])
return
if content_type.startswith("application/x-www-form-urlencoded"):
try:
uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
except Exception as e:
gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
uri_arguments = {}
for name, values in uri_arguments.items():
if values:
arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
try:
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
parse_multipart_form_data(utf8(v), body, arguments, files)
break
else:
raise ValueError("multipart boundary not found")
except Exception as e:
gen_log.warning("Invalid multipart/form-data: %s", e)
def parse_multipart_form_data(boundary, data, arguments, files):
"""Parses a ``multipart/form-data`` body.
The ``boundary`` and ``data`` parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b'"') and boundary.endswith(b'"'):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b"--" + boundary + b"--")
if final_boundary_index == -1:
gen_log.warning("Invalid multipart/form-data: no final boundary")
return
parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
for part in parts:
if not part:
continue
eoh = part.find(b"\r\n\r\n")
if eoh == -1:
gen_log.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b"\r\n"):
gen_log.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name"):
gen_log.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(HTTPFile( # type: ignore
filename=disp_params["filename"], body=value,
content_type=ctype))
else:
arguments.setdefault(name, []).append(value)
def format_timestamp(ts):
"""Formats a timestamp in the format used by HTTP.
The argument may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
object.
>>> format_timestamp(1359312200)
'Sun, 27 Jan 2013 18:43:20 GMT'
"""
if isinstance(ts, numbers.Real):
pass
elif isinstance(ts, (tuple, time.struct_time)):
ts = calendar.timegm(ts)
elif isinstance(ts, datetime.datetime):
ts = calendar.timegm(ts.utctimetuple())
else:
raise TypeError("unknown timestamp type: %r" % ts)
return email.utils.formatdate(ts, usegmt=True)
RequestStartLine = collections.namedtuple(
'RequestStartLine', ['method', 'path', 'version'])
def parse_request_start_line(line):
"""Returns a (method, path, version) tuple for an HTTP 1.x request line.
The response is a `collections.namedtuple`.
>>> parse_request_start_line("GET /foo HTTP/1.1")
RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
"""
try:
method, path, version = line.split(" ")
except ValueError:
raise HTTPInputError("Malformed HTTP request line")
if not re.match(r"^HTTP/1\.[0-9]$", version):
raise HTTPInputError(
"Malformed HTTP version in HTTP Request-Line: %r" % version)
return RequestStartLine(method, path, version)
ResponseStartLine = collections.namedtuple(
'ResponseStartLine', ['version', 'code', 'reason'])
def parse_response_start_line(line):
"""Returns a (version, code, reason) tuple for an HTTP 1.x response line.
The response is a `collections.namedtuple`.
>>> parse_response_start_line("HTTP/1.1 200 OK")
ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
"""
line = native_str(line)
match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line)
if not match:
raise HTTPInputError("Error parsing response start line")
return ResponseStartLine(match.group(1), int(match.group(2)),
match.group(3))
# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
# The original 2.7 version of this code did not correctly support some
# combinations of semicolons and double quotes.
# It has also been modified to support valueless parameters as seen in
# websocket extension negotiations.
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def _parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
else:
pdict[p] = None
return key, pdict
def _encode_header(key, pdict):
"""Inverse of _parse_header.
>>> _encode_header('permessage-deflate',
... {'client_max_window_bits': 15, 'client_no_context_takeover': None})
'permessage-deflate; client_max_window_bits=15; client_no_context_takeover'
"""
if not pdict:
return key
out = [key]
# Sort the parameters just to make it easy to test.
for k, v in sorted(pdict.items()):
if v is None:
out.append(k)
else:
# TODO: quote if necessary.
out.append('%s=%s' % (k, v))
return '; '.join(out)
def doctests():
import doctest
return doctest.DocTestSuite()
def split_host_and_port(netloc):
"""Returns ``(host, port)`` tuple from ``netloc``.
Returned ``port`` will be ``None`` if not present.
.. versionadded:: 4.1
"""
match = re.match(r'^(.+):(\d+)$', netloc)
if match:
host = match.group(1)
port = int(match.group(2))
else:
host = netloc
port = None
return (host, port)
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
_nulljoin = ''.join
def _unquote_cookie(str):
"""Handle double quotes and escaping in cookie values.
This method is copied verbatim from the Python 3.5 standard
library (http.cookies._unquote) so we don't have to depend on
non-public interfaces.
"""
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if str is None or len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k + 1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j + 1:j + 4], 8)))
i = j + 4
return _nulljoin(res)
def parse_cookie(cookie):
"""Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
.. versionadded:: 4.4.2
"""
cookiedict = {}
for chunk in cookie.split(str(';')):
if str('=') in chunk:
key, val = chunk.split(str('='), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(''), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = _unquote_cookie(val)
return cookiedict
| BugsInPy/BugsInPy/temp/projects/tornado/bug-9-fixed/tornado/tornado/httputil.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-9-buggy/tornado/tornado/httputil.py |
tornado-bug-10 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
from __future__ import absolute_import, division, print_function
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import tornado
import traceback
import types
from inspect import isclass
from io import BytesIO
from tornado.concurrent import Future
from tornado import escape
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import stack_context
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
ReversibleRouter, Rule, ReversibleRuleRouter,
URLSpec)
from tornado.util import (ObjectDict, raise_exc_info,
unicode_type, _websocket_mask, PY3)
url = URLSpec
if PY3:
import http.cookies as Cookie
import urllib.parse as urlparse
from urllib.parse import urlencode
else:
import Cookie
import urlparse
from urllib import urlencode
try:
import typing # noqa
# The following types are accepted by RequestHandler.set_header
# and related methods.
_HeaderTypes = typing.Union[bytes, unicode_type,
numbers.Integral, datetime.datetime]
except ImportError:
pass
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridden by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # type: typing.Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self._headers = None # type: httputil.HTTPHeaders
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value):
# type: (_HeaderTypes) -> str
# Convert the input value to a str. This type check is a bit
# subtle: The bytes case only executes on python 3, and the
# unicode case only executes on python 2, because the other
# cases are covered by the first match for str.
if isinstance(value, str):
retval = value
elif isinstance(value, bytes): # py3
# Non-ascii characters in headers are not well supported,
# but if you pass bytes, use latin1 so they pass through as-is.
retval = value.decode('latin1')
elif isinstance(value, unicode_type): # py2
# TODO: This is inconsistent with the use of latin1 above,
# but it's been that way for a long time. Should it change?
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
_ARG_DEFAULT = object()
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
# Maintain order of JavaScript files given by modules
js = self.render_linked_js(js_files)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = self.render_embed_js(js_embed)
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = self.render_embed_css(css_embed)
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
def render_embed_js(self, js_embed):
"""Default method used to render the final embedded js for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
def render_linked_css(self, css_files):
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
def render_embed_css(self, css_embed):
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` and ``template_whitespace`` application
settings. If a ``template_loader`` application setting is
supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code in (204, 304):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is set in one of two ways:
* A subclass may override `get_current_user()`, which will be called
automatically the first time ``self.current_user`` is accessed.
`get_current_user()` will only be called once per request,
and is cached for future access::
def get_current_user(self):
user_cookie = self.get_secure_cookie("user")
if user_cookie:
return json.loads(user_cookie)
return None
* It may be set as a normal variable, typically from an overridden
`prepare()`::
@gen.coroutine
def prepare(self):
user_id_cookie = self.get_secure_cookie("user_id")
if user_id_cookie:
self.current_user = yield load_user(user_id_cookie)
Note that `prepare()` may be a coroutine while `get_current_user()`
may not, so the latter form is necessary if loading the user requires
asynchronous operations.
The user object may be any type of the application's choosing.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie.
This method may not be a coroutine.
"""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
.. versionchanged:: 4.3
The ``xsrf_cookie_kwargs`` `Application` setting may be
used to supply additional cookie options (which will be
passed directly to `set_cookie`). For example,
``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
will set the ``secure`` and ``httponly`` flags on the
``_xsrf`` cookie.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days,
**cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x):
return x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = yield result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = yield result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is for callback-style asynchronous methods; for
coroutines, use the ``@gen.coroutine`` decorator without
``@asynchronous``. (It is legal for legacy reasons to use the two
decorators together provided ``@asynchronous`` is first, but
``@asynchronous`` will be ignored in this case)
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example:
.. testcode::
class MyRequestHandler(RequestHandler):
@asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. testoutput::
:hide:
.. versionchanged:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
.. versionchanged:: 4.3 Returning anything but ``None`` or a
yieldable object from a method decorated with ``@asynchronous``
is an error. Such return values were previously ignored silently.
"""
# Delay the IOLoop import because it's not available on app engine.
from tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if result is not None:
result = gen.convert_yielded(result)
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
"""Routing implementation used internally by `Application`.
Provides a binding between `Application` and `RequestHandler`.
This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
* it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
* it allows to use a list/tuple of rules as `~.routing.Rule` target.
``process_rule`` implementation will substitute this list with an appropriate
`_ApplicationRouter` instance.
"""
def __init__(self, application, rules=None):
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter(self.application, rule.target)
return rule
def get_target_delegate(self, target, request, **target_params):
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(request, target, **target_params)
return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
class Application(ReversibleRouter):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `~.routing.Rule`
objects or tuples of values corresponding to the arguments of
`~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
the values in square brackets being optional. The default matcher is
`~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
instead of ``(PathMatches(regexp), target)``.
A common routing target is a `RequestHandler` subclass, but you can also
use lists of rules as a target, which create a nested routing configuration::
application = web.Application([
(HostMatches("example.com"), [
(r"/", MainPageHandler),
(r"/feed", FeedHandler),
]),
])
In addition to this you can use nested `~.routing.Router` instances,
`~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
(see `~.routing` module docs for more information).
When we receive requests, we iterate over the list in order and
instantiate an instance of the first request class whose regexp
matches the request path. The request class can be specified as
either a class object or a (fully-qualified) name.
A dictionary may be passed as the third element (``target_kwargs``)
of the tuple, which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(self, [
Rule(AnyMatches(), self.wildcard_router)
])
# Automatically reload modified modules
if self.settings.get('autoreload'):
from tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
Returns the `.HTTPServer` object.
.. versionchanged:: 4.3
Now returns the `.HTTPServer` object.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules([(
DefaultHostMatches(self, host_matcher.host_pattern),
host_handlers
)])
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(self, request, **kwargs):
route = self.default_router.find_handler(request)
if route is not None:
return route
if self.settings.get('default_handler_class'):
return self.get_handler_delegate(
request,
self.settings['default_handler_class'],
self.settings.get('default_handler_args', {}))
return self.get_handler_delegate(
request, ErrorHandler, {'status_code': 404})
def get_handler_delegate(self, request, target_class, target_kwargs=None,
path_args=None, path_kwargs=None):
"""Returns `~.httputil.HTTPMessageDelegate` that can serve a request
for application and `RequestHandler` subclass.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg RequestHandler target_class: a `RequestHandler` class.
:arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
:arg list path_args: positional arguments for ``target_class`` HTTP method that
will be executed while handling a request (``get``, ``post`` or any other).
:arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
"""
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs)
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(self, application, request, handler_class, handler_kwargs,
path_args, path_kwargs):
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = []
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(self, start_line, headers):
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code=500, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will
end (calling `RequestHandler.finish` if it hasn't already been
called), but the error-handling methods (including
`RequestHandler.write_error`) will not be called.
If `Finish()` was created with no arguments, the pending response
will be sent as-is. If `Finish()` was given an argument, that
argument will be passed to `RequestHandler.finish()`.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
.. versionchanged:: 4.3
Arguments passed to ``Finish()`` will be passed on to
`RequestHandler.finish`.
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
`RedirectHandler` supports regular expression substitutions. E.g., to
swap the first and second parts of a path while preserving the remainder::
application = web.Application([
(r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
])
The final URL is formatted with `str.format` and the substrings that match
the capturing groups. In the above example, a request to "/a/b/c" would be
formatted like::
str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
Use Python's :ref:`format string syntax <formatstrings>` to customize how
values are substituted.
.. versionchanged:: 4.5
Added support for substitutions into the destination URL.
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video).
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: typing.Dict
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml",
"image/svg+xml"])
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self):
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self):
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self):
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self):
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self):
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
from __future__ import absolute_import, division, print_function
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import tornado
import traceback
import types
from inspect import isclass
from io import BytesIO
from tornado.concurrent import Future
from tornado import escape
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import stack_context
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
ReversibleRouter, Rule, ReversibleRuleRouter,
URLSpec)
from tornado.util import (ObjectDict, raise_exc_info,
unicode_type, _websocket_mask, PY3)
url = URLSpec
if PY3:
import http.cookies as Cookie
import urllib.parse as urlparse
from urllib.parse import urlencode
else:
import Cookie
import urlparse
from urllib import urlencode
try:
import typing # noqa
# The following types are accepted by RequestHandler.set_header
# and related methods.
_HeaderTypes = typing.Union[bytes, unicode_type,
numbers.Integral, datetime.datetime]
except ImportError:
pass
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridden by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # type: typing.Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self._headers = None # type: httputil.HTTPHeaders
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value):
# type: (_HeaderTypes) -> str
# Convert the input value to a str. This type check is a bit
# subtle: The bytes case only executes on python 3, and the
# unicode case only executes on python 2, because the other
# cases are covered by the first match for str.
if isinstance(value, str):
retval = value
elif isinstance(value, bytes): # py3
# Non-ascii characters in headers are not well supported,
# but if you pass bytes, use latin1 so they pass through as-is.
retval = value.decode('latin1')
elif isinstance(value, unicode_type): # py2
# TODO: This is inconsistent with the use of latin1 above,
# but it's been that way for a long time. Should it change?
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
_ARG_DEFAULT = object()
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
# Maintain order of JavaScript files given by modules
js = self.render_linked_js(js_files)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = self.render_embed_js(js_embed)
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = self.render_embed_css(css_embed)
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
def render_embed_js(self, js_embed):
"""Default method used to render the final embedded js for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
def render_linked_css(self, css_files):
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
def render_embed_css(self, css_embed):
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` and ``template_whitespace`` application
settings. If a ``template_loader`` application setting is
supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code in (204, 304):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
def _break_cycles(self):
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is set in one of two ways:
* A subclass may override `get_current_user()`, which will be called
automatically the first time ``self.current_user`` is accessed.
`get_current_user()` will only be called once per request,
and is cached for future access::
def get_current_user(self):
user_cookie = self.get_secure_cookie("user")
if user_cookie:
return json.loads(user_cookie)
return None
* It may be set as a normal variable, typically from an overridden
`prepare()`::
@gen.coroutine
def prepare(self):
user_id_cookie = self.get_secure_cookie("user_id")
if user_id_cookie:
self.current_user = yield load_user(user_id_cookie)
Note that `prepare()` may be a coroutine while `get_current_user()`
may not, so the latter form is necessary if loading the user requires
asynchronous operations.
The user object may be any type of the application's choosing.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie.
This method may not be a coroutine.
"""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
.. versionchanged:: 4.3
The ``xsrf_cookie_kwargs`` `Application` setting may be
used to supply additional cookie options (which will be
passed directly to `set_cookie`). For example,
``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
will set the ``secure`` and ``httponly`` flags on the
``_xsrf`` cookie.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days,
**cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x):
return x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = yield result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = yield result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is for callback-style asynchronous methods; for
coroutines, use the ``@gen.coroutine`` decorator without
``@asynchronous``. (It is legal for legacy reasons to use the two
decorators together provided ``@asynchronous`` is first, but
``@asynchronous`` will be ignored in this case)
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example:
.. testcode::
class MyRequestHandler(RequestHandler):
@asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. testoutput::
:hide:
.. versionchanged:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
.. versionchanged:: 4.3 Returning anything but ``None`` or a
yieldable object from a method decorated with ``@asynchronous``
is an error. Such return values were previously ignored silently.
"""
# Delay the IOLoop import because it's not available on app engine.
from tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if result is not None:
result = gen.convert_yielded(result)
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
"""Routing implementation used internally by `Application`.
Provides a binding between `Application` and `RequestHandler`.
This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
* it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
* it allows to use a list/tuple of rules as `~.routing.Rule` target.
``process_rule`` implementation will substitute this list with an appropriate
`_ApplicationRouter` instance.
"""
def __init__(self, application, rules=None):
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter(self.application, rule.target)
return rule
def get_target_delegate(self, target, request, **target_params):
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(request, target, **target_params)
return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
class Application(ReversibleRouter):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `~.routing.Rule`
objects or tuples of values corresponding to the arguments of
`~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
the values in square brackets being optional. The default matcher is
`~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
instead of ``(PathMatches(regexp), target)``.
A common routing target is a `RequestHandler` subclass, but you can also
use lists of rules as a target, which create a nested routing configuration::
application = web.Application([
(HostMatches("example.com"), [
(r"/", MainPageHandler),
(r"/feed", FeedHandler),
]),
])
In addition to this you can use nested `~.routing.Router` instances,
`~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
(see `~.routing` module docs for more information).
When we receive requests, we iterate over the list in order and
instantiate an instance of the first request class whose regexp
matches the request path. The request class can be specified as
either a class object or a (fully-qualified) name.
A dictionary may be passed as the third element (``target_kwargs``)
of the tuple, which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(self, [
Rule(AnyMatches(), self.wildcard_router)
])
# Automatically reload modified modules
if self.settings.get('autoreload'):
from tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
Returns the `.HTTPServer` object.
.. versionchanged:: 4.3
Now returns the `.HTTPServer` object.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules([(
DefaultHostMatches(self, host_matcher.host_pattern),
host_handlers
)])
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(self, request, **kwargs):
route = self.default_router.find_handler(request)
if route is not None:
return route
if self.settings.get('default_handler_class'):
return self.get_handler_delegate(
request,
self.settings['default_handler_class'],
self.settings.get('default_handler_args', {}))
return self.get_handler_delegate(
request, ErrorHandler, {'status_code': 404})
def get_handler_delegate(self, request, target_class, target_kwargs=None,
path_args=None, path_kwargs=None):
"""Returns `~.httputil.HTTPMessageDelegate` that can serve a request
for application and `RequestHandler` subclass.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg RequestHandler target_class: a `RequestHandler` class.
:arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
:arg list path_args: positional arguments for ``target_class`` HTTP method that
will be executed while handling a request (``get``, ``post`` or any other).
:arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
"""
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs)
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(self, application, request, handler_class, handler_kwargs,
path_args, path_kwargs):
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = []
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(self, start_line, headers):
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code=500, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will
end (calling `RequestHandler.finish` if it hasn't already been
called), but the error-handling methods (including
`RequestHandler.write_error`) will not be called.
If `Finish()` was created with no arguments, the pending response
will be sent as-is. If `Finish()` was given an argument, that
argument will be passed to `RequestHandler.finish()`.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
.. versionchanged:: 4.3
Arguments passed to ``Finish()`` will be passed on to
`RequestHandler.finish`.
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
`RedirectHandler` supports regular expression substitutions. E.g., to
swap the first and second parts of a path while preserving the remainder::
application = web.Application([
(r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
])
The final URL is formatted with `str.format` and the substrings that match
the capturing groups. In the above example, a request to "/a/b/c" would be
formatted like::
str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
Use Python's :ref:`format string syntax <formatstrings>` to customize how
values are substituted.
.. versionchanged:: 4.5
Added support for substitutions into the destination URL.
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video).
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: typing.Dict
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml",
"image/svg+xml"])
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self):
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self):
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self):
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self):
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self):
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
| BugsInPy/BugsInPy/temp/projects/tornado/bug-10-fixed/tornado/tornado/web.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-10-buggy/tornado/tornado/web.py |
tornado-bug-1 | """Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
import abc
import asyncio
import base64
import hashlib
import os
import sys
import struct
import tornado.escape
import tornado.web
from urllib.parse import urlparse
import zlib
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str, to_unicode
from tornado import gen, httpclient, httputil
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.iostream import StreamClosedError, IOStream
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.queues import Queue
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
from typing import (
TYPE_CHECKING,
cast,
Any,
Optional,
Dict,
Union,
List,
Awaitable,
Callable,
Tuple,
Type,
)
from types import TracebackType
if TYPE_CHECKING:
from typing_extensions import Protocol
# The zlib compressor types aren't actually exposed anywhere
# publicly, so declare protocols for the portions we use.
class _Compressor(Protocol):
def compress(self, data: bytes) -> bytes:
pass
def flush(self, mode: int) -> bytes:
pass
class _Decompressor(Protocol):
unconsumed_tail = b"" # type: bytes
def decompress(self, data: bytes, max_length: int) -> bytes:
pass
class _WebSocketDelegate(Protocol):
# The common base interface implemented by WebSocketHandler on
# the server side and WebSocketClientConnection on the client
# side.
def on_ws_connection_close(
self, close_code: int = None, close_reason: str = None
) -> None:
pass
def on_message(self, message: Union[str, bytes]) -> Optional["Awaitable[None]"]:
pass
def on_ping(self, data: bytes) -> None:
pass
def on_pong(self, data: bytes) -> None:
pass
def log_exception(
self,
typ: Optional[Type[BaseException]],
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
pass
_default_max_message_size = 10 * 1024 * 1024
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class _DecompressTooLargeError(Exception):
pass
class _WebSocketParams(object):
def __init__(
self,
ping_interval: float = None,
ping_timeout: float = None,
max_message_size: int = _default_max_message_size,
compression_options: Dict[str, Any] = None,
) -> None:
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.max_message_size = max_message_size
self.compression_options = compression_options
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
Custom upgrade response headers can be sent by overriding
`~tornado.web.RequestHandler.set_default_headers` or
`~tornado.web.RequestHandler.prepare`.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
If the application setting ``websocket_ping_interval`` has a non-zero
value, a ping will be sent periodically, and the connection will be
closed if a response is not received before the ``websocket_ping_timeout``.
Messages larger than the ``websocket_max_message_size`` application setting
(default 10MiB) will not be accepted.
.. versionchanged:: 4.5
Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
``websocket_max_message_size``.
"""
def __init__(
self,
application: tornado.web.Application,
request: httputil.HTTPServerRequest,
**kwargs: Any
) -> None:
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None # type: Optional[WebSocketProtocol]
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self.stream = None # type: Optional[IOStream]
self._on_close_called = False
async def get(self, *args: Any, **kwargs: Any) -> None:
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != "websocket":
self.set_status(400)
log_msg = 'Can "Upgrade" only to "WebSocket".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(
lambda s: s.strip().lower(), headers.get("Connection", "").split(",")
)
if "upgrade" not in connection:
self.set_status(400)
log_msg = '"Connection" must be "Upgrade".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
await self.ws_connection.accept_connection(self)
else:
self.set_status(426, "Upgrade Required")
self.set_header("Sec-WebSocket-Version", "7, 8, 13")
stream = None
@property
def ping_interval(self) -> Optional[float]:
"""The interval for websocket keep-alive pings.
Set websocket_ping_interval = 0 to disable pings.
"""
return self.settings.get("websocket_ping_interval", None)
@property
def ping_timeout(self) -> Optional[float]:
"""If no ping is received in this many seconds,
close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
Default is max of 3 pings or 30 seconds.
"""
return self.settings.get("websocket_ping_timeout", None)
@property
def max_message_size(self) -> int:
"""Maximum allowed message size.
If the remote peer sends a message larger than this, the connection
will be closed.
Default is 10MiB.
"""
return self.settings.get(
"websocket_max_message_size", _default_max_message_size
)
def write_message(
self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Consistently raises `WebSocketClosedError`. Previously could
sometimes raise `.StreamClosedError`.
"""
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols: List[str]) -> Optional[str]:
"""Override to implement subprotocol negotiation.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol.
Failure to select a subprotocol does not automatically abort
the connection, although clients may close the connection if
none of their proposed subprotocols was selected.
The list may be empty, in which case this method must return
None. This method is always called exactly once even if no
subprotocols were proposed so that the handler can be advised
of this fact.
.. versionchanged:: 5.1
Previously, this method was called with a list containing
an empty string instead of an empty list if no subprotocols
were proposed by the client.
"""
return None
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol returned by `select_subprotocol`.
.. versionadded:: 5.1
"""
assert self.ws_connection is not None
return self.ws_connection.selected_subprotocol
def get_compression_options(self) -> Optional[Dict[str, Any]]:
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the following compression options:
``compression_level`` specifies the compression level.
``mem_level`` specifies the amount of memory used for the internal compression state.
These parameters are documented in details here:
https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
.. versionadded:: 4.1
.. versionchanged:: 4.5
Added ``compression_level`` and ``mem_level``.
"""
# TODO: Add wbits option.
return None
def open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]:
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
`open` may be a coroutine. `on_message` will not be called until
`open` has returned.
.. versionchanged:: 5.1
``open`` may be a coroutine.
"""
pass
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
"""Handle incoming messages on the WebSocket
This method must be overridden.
.. versionchanged:: 4.5
``on_message`` can be a coroutine.
"""
raise NotImplementedError
def ping(self, data: Union[str, bytes] = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``websocket_ping_interval`` application
setting instead of sending pings manually.
.. versionchanged:: 5.1
The data argument is now optional.
"""
data = utf8(data)
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data: bytes) -> None:
"""Invoked when the response to a ping frame is received."""
pass
def on_ping(self, data: bytes) -> None:
"""Invoked when the a ping frame is received."""
pass
def on_close(self) -> None:
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code: int = None, reason: str = None) -> None:
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin: str) -> bool:
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return ``True`` to accept the request or ``False`` to
reject it. By default, rejects all requests with an origin on
a host other than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
.. warning::
This is an important security measure; don't disable it
without understanding the security implications. In
particular, if your authentication is cookie-based, you
must either restrict the origins allowed by
``check_origin()`` or implement your own XSRF-like
protection for websocket connections. See `these
<https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
`articles
<https://devcenter.heroku.com/articles/websocket-security>`_
for more.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return ``True``::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value: bool) -> None:
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
assert self.stream is not None
self.stream.set_nodelay(value)
def on_connection_close(self) -> None:
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
self._break_cycles()
def on_ws_connection_close(
self, close_code: int = None, close_reason: str = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _break_cycles(self) -> None:
# WebSocketHandlers call finish() early, but we don't want to
# break up reference cycles (which makes it impossible to call
# self.render_string) until after we've really closed the
# connection (if it was established in the first place,
# indicated by status code 101).
if self.get_status() != 101 or self._on_close_called:
super(WebSocketHandler, self)._break_cycles()
def send_error(self, *args: Any, **kwargs: Any) -> None:
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self) -> Optional["WebSocketProtocol"]:
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
params = _WebSocketParams(
ping_interval=self.ping_interval,
ping_timeout=self.ping_timeout,
max_message_size=self.max_message_size,
compression_options=self.get_compression_options(),
)
return WebSocketProtocol13(self, False, params)
return None
def _detach_stream(self) -> IOStream:
# disable non-WS methods
for method in [
"write",
"redirect",
"set_header",
"set_cookie",
"set_status",
"flush",
"finish",
]:
setattr(self, method, _raise_not_supported_for_websockets)
return self.detach()
def _raise_not_supported_for_websockets(*args: Any, **kwargs: Any) -> None:
raise RuntimeError("Method not supported for Web Sockets")
class WebSocketProtocol(abc.ABC):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler: "_WebSocketDelegate") -> None:
self.handler = handler
self.stream = None # type: Optional[IOStream]
self.client_terminated = False
self.server_terminated = False
def _run_callback(
self, callback: Callable, *args: Any, **kwargs: Any
) -> "Optional[Future[Any]]":
"""Runs the given callback with exception handling.
If the callback is a coroutine, returns its Future. On error, aborts the
websocket connection and returns None.
"""
try:
result = callback(*args, **kwargs)
except Exception:
self.handler.log_exception(*sys.exc_info())
self._abort()
return None
else:
if result is not None:
result = gen.convert_yielded(result)
assert self.stream is not None
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
def on_connection_close(self) -> None:
self._abort()
def _abort(self) -> None:
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
if self.stream is not None:
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
@abc.abstractmethod
def close(self, code: int = None, reason: str = None) -> None:
raise NotImplementedError()
@abc.abstractmethod
def is_closing(self) -> bool:
raise NotImplementedError()
@abc.abstractmethod
async def accept_connection(self, handler: WebSocketHandler) -> None:
raise NotImplementedError()
@abc.abstractmethod
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
raise NotImplementedError()
@property
@abc.abstractmethod
def selected_subprotocol(self) -> Optional[str]:
raise NotImplementedError()
@abc.abstractmethod
def write_ping(self, data: bytes) -> None:
raise NotImplementedError()
# The entry points below are used by WebSocketClientConnection,
# which was introduced after we only supported a single version of
# WebSocketProtocol. The WebSocketProtocol/WebSocketProtocol13
# boundary is currently pretty ad-hoc.
@abc.abstractmethod
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
raise NotImplementedError()
@abc.abstractmethod
def start_pinging(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
async def _receive_frame_loop(self) -> None:
raise NotImplementedError()
class _PerMessageDeflateCompressor(object):
def __init__(
self,
persistent: bool,
max_wbits: Optional[int],
compression_options: Dict[str, Any] = None,
) -> None:
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError(
"Invalid max_wbits value %r; allowed range 8-%d",
max_wbits,
zlib.MAX_WBITS,
)
self._max_wbits = max_wbits
if (
compression_options is None
or "compression_level" not in compression_options
):
self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
else:
self._compression_level = compression_options["compression_level"]
if compression_options is None or "mem_level" not in compression_options:
self._mem_level = 8
else:
self._mem_level = compression_options["mem_level"]
if persistent:
self._compressor = self._create_compressor() # type: Optional[_Compressor]
else:
self._compressor = None
def _create_compressor(self) -> "_Compressor":
return zlib.compressobj(
self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level
)
def compress(self, data: bytes) -> bytes:
compressor = self._compressor or self._create_compressor()
data = compressor.compress(data) + compressor.flush(zlib.Z_SYNC_FLUSH)
assert data.endswith(b"\x00\x00\xff\xff")
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(
self,
persistent: bool,
max_wbits: Optional[int],
max_message_size: int,
compression_options: Dict[str, Any] = None,
) -> None:
self._max_message_size = max_message_size
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError(
"Invalid max_wbits value %r; allowed range 8-%d",
max_wbits,
zlib.MAX_WBITS,
)
self._max_wbits = max_wbits
if persistent:
self._decompressor = (
self._create_decompressor()
) # type: Optional[_Decompressor]
else:
self._decompressor = None
def _create_decompressor(self) -> "_Decompressor":
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data: bytes) -> bytes:
decompressor = self._decompressor or self._create_decompressor()
result = decompressor.decompress(
data + b"\x00\x00\xff\xff", self._max_message_size
)
if decompressor.unconsumed_tail:
raise _DecompressTooLargeError()
return result
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0F
stream = None # type: IOStream
def __init__(
self,
handler: "_WebSocketDelegate",
mask_outgoing: bool,
params: _WebSocketParams,
) -> None:
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self.params = params
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None # type: Optional[bytes]
self._frame_length = None
self._fragmented_message_buffer = None # type: Optional[bytes]
self._fragmented_message_opcode = None
self._waiting = None # type: object
self._compression_options = params.compression_options
self._decompressor = None # type: Optional[_PerMessageDeflateDecompressor]
self._compressor = None # type: Optional[_PerMessageDeflateCompressor]
self._frame_compressed = None # type: Optional[bool]
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
self.ping_callback = None # type: Optional[PeriodicCallback]
self.last_ping = 0.0
self.last_pong = 0.0
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
# Use a property for this to satisfy the abc.
@property
def selected_subprotocol(self) -> Optional[str]:
return self._selected_subprotocol
@selected_subprotocol.setter
def selected_subprotocol(self, value: Optional[str]) -> None:
self._selected_subprotocol = value
async def accept_connection(self, handler: WebSocketHandler) -> None:
try:
self._handle_websocket_headers(handler)
except ValueError:
handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
await self._accept_connection(handler)
except asyncio.CancelledError:
self._abort()
return
except ValueError:
gen_log.debug("Malformed WebSocket request received", exc_info=True)
self._abort()
return
def _handle_websocket_headers(self, handler: WebSocketHandler) -> None:
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: handler.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key: Union[str, bytes]) -> str:
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self, handler: WebSocketHandler) -> str:
return WebSocketProtocol13.compute_accept_value(
cast(str, handler.request.headers.get("Sec-Websocket-Key"))
)
async def _accept_connection(self, handler: WebSocketHandler) -> None:
subprotocol_header = handler.request.headers.get("Sec-WebSocket-Protocol")
if subprotocol_header:
subprotocols = [s.strip() for s in subprotocol_header.split(",")]
else:
subprotocols = []
self.selected_subprotocol = handler.select_subprotocol(subprotocols)
if self.selected_subprotocol:
assert self.selected_subprotocol in subprotocols
handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol)
extensions = self._parse_extensions_header(handler.request.headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors("server", ext[1], self._compression_options)
if (
"client_max_window_bits" in ext[1]
and ext[1]["client_max_window_bits"] is None
):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]["client_max_window_bits"]
handler.set_header(
"Sec-WebSocket-Extensions",
httputil._encode_header("permessage-deflate", ext[1]),
)
break
handler.clear_header("Content-Type")
handler.set_status(101)
handler.set_header("Upgrade", "websocket")
handler.set_header("Connection", "Upgrade")
handler.set_header("Sec-WebSocket-Accept", self._challenge_response(handler))
handler.finish()
self.stream = handler._detach_stream()
self.start_pinging()
try:
open_result = handler.open(*handler.open_args, **handler.open_kwargs)
if open_result is not None:
await open_result
except Exception:
handler.log_exception(*sys.exc_info())
self._abort()
return
await self._receive_frame_loop()
def _parse_extensions_header(
self, headers: httputil.HTTPHeaders
) -> List[Tuple[str, Dict[str, str]]]:
extensions = headers.get("Sec-WebSocket-Extensions", "")
if extensions:
return [httputil._parse_header(e.strip()) for e in extensions.split(",")]
return []
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers["Upgrade"].lower() == "websocket"
assert headers["Connection"].lower() == "upgrade"
accept = self.compute_accept_value(key)
assert headers["Sec-Websocket-Accept"] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
self._create_compressors("client", ext[1])
else:
raise ValueError("unsupported extension %r", ext)
self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None)
def _get_compressor_options(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Dict[str, Any] = None,
) -> Dict[str, Any]:
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + "_no_context_takeover") not in agreed_parameters
) # type: Dict[str, Any]
wbits_header = agreed_parameters.get(side + "_max_window_bits", None)
if wbits_header is None:
options["max_wbits"] = zlib.MAX_WBITS
else:
options["max_wbits"] = int(wbits_header)
options["compression_options"] = compression_options
return options
def _create_compressors(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Dict[str, Any] = None,
) -> None:
# TODO: handle invalid parameters gracefully
allowed_keys = set(
[
"server_no_context_takeover",
"client_no_context_takeover",
"server_max_window_bits",
"client_max_window_bits",
]
)
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = "client" if (side == "server") else "server"
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters, compression_options)
)
self._decompressor = _PerMessageDeflateDecompressor(
max_message_size=self.params.max_message_size,
**self._get_compressor_options(
other_side, agreed_parameters, compression_options
)
)
def _write_frame(
self, fin: bool, opcode: int, data: bytes, flags: int = 0
) -> "Future[None]":
data_len = len(data)
if opcode & 0x8:
# All control frames MUST have a payload length of 125
# bytes or less and MUST NOT be fragmented.
if not fin:
raise ValueError("control frames may not be fragmented")
if data_len > 125:
raise ValueError("control frame payloads may not exceed 125 bytes")
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if data_len < 126:
frame += struct.pack("B", data_len | mask_bit)
elif data_len <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, data_len)
else:
frame += struct.pack("!BQ", 127 | mask_bit, data_len)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
return self.stream.write(frame)
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
# For historical reasons, write methods in Tornado operate in a semi-synchronous
# mode in which awaiting the Future they return is optional (But errors can
# still be raised). This requires us to go through an awkward dance here
# to transform the errors that may be returned while presenting the same
# semi-synchronous interface.
try:
fut = self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
raise WebSocketClosedError()
async def wrapper() -> None:
try:
await fut
except StreamClosedError:
raise WebSocketClosedError()
return asyncio.ensure_future(wrapper())
def write_ping(self, data: bytes) -> None:
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
async def _receive_frame_loop(self) -> None:
try:
while not self.client_terminated:
await self._receive_frame()
except StreamClosedError:
self._abort()
self.handler.on_ws_connection_close(self.close_code, self.close_reason)
async def _read_bytes(self, n: int) -> bytes:
data = await self.stream.read_bytes(n)
self._wire_bytes_in += n
return data
async def _receive_frame(self) -> None:
# Read the frame header.
data = await self._read_bytes(2)
header, mask_payloadlen = struct.unpack("BB", data)
is_final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
opcode = header & self.OPCODE_MASK
opcode_is_control = opcode & 0x8
if self._decompressor is not None and opcode != 0:
# Compression flag is present in the first frame's header,
# but we can't decompress until we have all the frames of
# the message.
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
is_masked = bool(mask_payloadlen & 0x80)
payloadlen = mask_payloadlen & 0x7F
# Parse and validate the length.
if opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
if payloadlen < 126:
self._frame_length = payloadlen
elif payloadlen == 126:
data = await self._read_bytes(2)
payloadlen = struct.unpack("!H", data)[0]
elif payloadlen == 127:
data = await self._read_bytes(8)
payloadlen = struct.unpack("!Q", data)[0]
new_len = payloadlen
if self._fragmented_message_buffer is not None:
new_len += len(self._fragmented_message_buffer)
if new_len > self.params.max_message_size:
self.close(1009, "message too big")
self._abort()
return
# Read the payload, unmasking if necessary.
if is_masked:
self._frame_mask = await self._read_bytes(4)
data = await self._read_bytes(payloadlen)
if is_masked:
assert self._frame_mask is not None
data = _websocket_mask(self._frame_mask, data)
# Decide what to do with this frame.
if opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not is_final_frame:
# control frames must not be fragmented
self._abort()
return
elif opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if is_final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if not is_final_frame:
self._fragmented_message_opcode = opcode
self._fragmented_message_buffer = data
if is_final_frame:
handled_future = self._handle_message(opcode, data)
if handled_future is not None:
await handled_future
def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]":
"""Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated:
return None
if self._frame_compressed:
assert self._decompressor is not None
try:
data = self._decompressor.decompress(data)
except _DecompressTooLargeError:
self.close(1009, "message too big after decompression")
self._abort()
return None
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return None
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.close_code = struct.unpack(">H", data[:2])[0]
if len(data) > 2:
self.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.close_code)
elif opcode == 0x9:
# Ping
try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self.last_pong = IOLoop.current().time()
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
return None
def close(self, code: int = None, reason: str = None) -> None:
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b""
else:
close_data = struct.pack(">H", code)
if reason is not None:
close_data += utf8(reason)
try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort
)
def is_closing(self) -> bool:
"""Return ``True`` if this connection is closing.
The connection is considered closing if either side has
initiated its closing handshake or if the stream has been
shut down uncleanly.
"""
return self.stream.closed() or self.client_terminated or self.server_terminated
@property
def ping_interval(self) -> Optional[float]:
interval = self.params.ping_interval
if interval is not None:
return interval
return 0
@property
def ping_timeout(self) -> Optional[float]:
timeout = self.params.ping_timeout
if timeout is not None:
return timeout
assert self.ping_interval is not None
return max(3 * self.ping_interval, 30)
def start_pinging(self) -> None:
"""Start sending periodic pings to keep the connection alive"""
assert self.ping_interval is not None
if self.ping_interval > 0:
self.last_ping = self.last_pong = IOLoop.current().time()
self.ping_callback = PeriodicCallback(
self.periodic_ping, self.ping_interval * 1000
)
self.ping_callback.start()
def periodic_ping(self) -> None:
"""Send a ping to keep the websocket alive
Called periodically if the websocket_ping_interval is set and non-zero.
"""
if self.is_closing() and self.ping_callback is not None:
self.ping_callback.stop()
return
# Check for timeout on pong. Make sure that we really have
# sent a recent ping in case the machine with both server and
# client has been suspended since the last ping.
now = IOLoop.current().time()
since_last_pong = now - self.last_pong
since_last_ping = now - self.last_ping
assert self.ping_interval is not None
assert self.ping_timeout is not None
if (
since_last_ping < 2 * self.ping_interval
and since_last_pong > self.ping_timeout
):
self.close()
return
self.write_ping(b"")
self.last_ping = now
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
protocol = None # type: WebSocketProtocol
def __init__(
self,
request: httpclient.HTTPRequest,
on_message_callback: Callable[[Union[None, str, bytes]], None] = None,
compression_options: Dict[str, Any] = None,
ping_interval: float = None,
ping_timeout: float = None,
max_message_size: int = _default_max_message_size,
subprotocols: Optional[List[str]] = [],
) -> None:
self.connect_future = Future() # type: Future[WebSocketClientConnection]
self.read_queue = Queue(1) # type: Queue[Union[None, str, bytes]]
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self.params = _WebSocketParams(
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
compression_options=compression_options,
)
scheme, sep, rest = request.url.partition(":")
scheme = {"ws": "http", "wss": "https"}[scheme]
request.url = scheme + sep + rest
request.headers.update(
{
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-WebSocket-Key": self.key,
"Sec-WebSocket-Version": "13",
}
)
if subprotocols is not None:
request.headers["Sec-WebSocket-Protocol"] = ",".join(subprotocols)
if compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers[
"Sec-WebSocket-Extensions"
] = "permessage-deflate; client_max_window_bits"
self.tcp_client = TCPClient()
super(WebSocketClientConnection, self).__init__(
None,
request,
lambda: None,
self._on_http_response,
104857600,
self.tcp_client,
65536,
104857600,
)
def close(self, code: int = None, reason: str = None) -> None:
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None # type: ignore
def on_connection_close(self) -> None:
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self._on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def on_ws_connection_close(
self, close_code: int = None, close_reason: str = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _on_http_response(self, response: httpclient.HTTPResponse) -> None:
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(
WebSocketError("Non-websocket response")
)
async def headers_received(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
) -> None:
assert isinstance(start_line, httputil.ResponseStartLine)
if start_line.code != 101:
await super(WebSocketClientConnection, self).headers_received(
start_line, headers
)
return
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol.stream = self.connection.detach()
IOLoop.current().add_callback(self.protocol._receive_frame_loop)
self.protocol.start_pinging()
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None # type: ignore
future_set_result_unless_cancelled(self.connect_future, self)
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
"""Sends a message to the WebSocket server.
If the stream is closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Exception raised on a closed stream changed from `.StreamClosedError`
to `WebSocketClosedError`.
"""
return self.protocol.write_message(message, binary=binary)
def read_message(
self, callback: Callable[["Future[Union[None, str, bytes]]"], None] = None
) -> Awaitable[Union[None, str, bytes]]:
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
awaitable = self.read_queue.get()
if callback is not None:
self.io_loop.add_future(asyncio.ensure_future(awaitable), callback)
return awaitable
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
return self._on_message(message)
def _on_message(
self, message: Union[None, str, bytes]
) -> Optional[Awaitable[None]]:
if self._on_message_callback:
self._on_message_callback(message)
return None
else:
return self.read_queue.put(message)
def ping(self, data: bytes = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``ping_interval`` argument to
`websocket_connect` instead of sending pings manually.
.. versionadded:: 5.1
"""
data = utf8(data)
if self.protocol is None:
raise WebSocketClosedError()
self.protocol.write_ping(data)
def on_pong(self, data: bytes) -> None:
pass
def on_ping(self, data: bytes) -> None:
pass
def get_websocket_protocol(self) -> WebSocketProtocol:
return WebSocketProtocol13(self, mask_outgoing=True, params=self.params)
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol selected by the server.
.. versionadded:: 5.1
"""
return self.protocol.selected_subprotocol
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
assert typ is not None
assert value is not None
app_log.error("Uncaught exception %s", value, exc_info=(typ, value, tb))
def websocket_connect(
url: Union[str, httpclient.HTTPRequest],
callback: Callable[["Future[WebSocketClientConnection]"], None] = None,
connect_timeout: float = None,
on_message_callback: Callable[[Union[None, str, bytes]], None] = None,
compression_options: Dict[str, Any] = None,
ping_interval: float = None,
ping_timeout: float = None,
max_message_size: int = _default_max_message_size,
subprotocols: List[str] = None,
) -> "Awaitable[WebSocketClientConnection]":
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
``subprotocols`` may be a list of strings specifying proposed
subprotocols. The selected protocol may be found on the
``selected_subprotocol`` attribute of the connection object
when the connection is complete.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.1
Added the ``subprotocols`` argument.
"""
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = cast(
httpclient.HTTPRequest,
httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS),
)
conn = WebSocketClientConnection(
request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
subprotocols=subprotocols,
)
if callback is not None:
IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
import abc
import asyncio
import base64
import hashlib
import os
import sys
import struct
import tornado.escape
import tornado.web
from urllib.parse import urlparse
import zlib
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str, to_unicode
from tornado import gen, httpclient, httputil
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.iostream import StreamClosedError, IOStream
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.queues import Queue
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
from typing import (
TYPE_CHECKING,
cast,
Any,
Optional,
Dict,
Union,
List,
Awaitable,
Callable,
Tuple,
Type,
)
from types import TracebackType
if TYPE_CHECKING:
from typing_extensions import Protocol
# The zlib compressor types aren't actually exposed anywhere
# publicly, so declare protocols for the portions we use.
class _Compressor(Protocol):
def compress(self, data: bytes) -> bytes:
pass
def flush(self, mode: int) -> bytes:
pass
class _Decompressor(Protocol):
unconsumed_tail = b"" # type: bytes
def decompress(self, data: bytes, max_length: int) -> bytes:
pass
class _WebSocketDelegate(Protocol):
# The common base interface implemented by WebSocketHandler on
# the server side and WebSocketClientConnection on the client
# side.
def on_ws_connection_close(
self, close_code: int = None, close_reason: str = None
) -> None:
pass
def on_message(self, message: Union[str, bytes]) -> Optional["Awaitable[None]"]:
pass
def on_ping(self, data: bytes) -> None:
pass
def on_pong(self, data: bytes) -> None:
pass
def log_exception(
self,
typ: Optional[Type[BaseException]],
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
pass
_default_max_message_size = 10 * 1024 * 1024
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class _DecompressTooLargeError(Exception):
pass
class _WebSocketParams(object):
def __init__(
self,
ping_interval: float = None,
ping_timeout: float = None,
max_message_size: int = _default_max_message_size,
compression_options: Dict[str, Any] = None,
) -> None:
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.max_message_size = max_message_size
self.compression_options = compression_options
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
Custom upgrade response headers can be sent by overriding
`~tornado.web.RequestHandler.set_default_headers` or
`~tornado.web.RequestHandler.prepare`.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
If the application setting ``websocket_ping_interval`` has a non-zero
value, a ping will be sent periodically, and the connection will be
closed if a response is not received before the ``websocket_ping_timeout``.
Messages larger than the ``websocket_max_message_size`` application setting
(default 10MiB) will not be accepted.
.. versionchanged:: 4.5
Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
``websocket_max_message_size``.
"""
def __init__(
self,
application: tornado.web.Application,
request: httputil.HTTPServerRequest,
**kwargs: Any
) -> None:
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None # type: Optional[WebSocketProtocol]
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self.stream = None # type: Optional[IOStream]
self._on_close_called = False
async def get(self, *args: Any, **kwargs: Any) -> None:
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != "websocket":
self.set_status(400)
log_msg = 'Can "Upgrade" only to "WebSocket".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(
lambda s: s.strip().lower(), headers.get("Connection", "").split(",")
)
if "upgrade" not in connection:
self.set_status(400)
log_msg = '"Connection" must be "Upgrade".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
await self.ws_connection.accept_connection(self)
else:
self.set_status(426, "Upgrade Required")
self.set_header("Sec-WebSocket-Version", "7, 8, 13")
stream = None
@property
def ping_interval(self) -> Optional[float]:
"""The interval for websocket keep-alive pings.
Set websocket_ping_interval = 0 to disable pings.
"""
return self.settings.get("websocket_ping_interval", None)
@property
def ping_timeout(self) -> Optional[float]:
"""If no ping is received in this many seconds,
close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
Default is max of 3 pings or 30 seconds.
"""
return self.settings.get("websocket_ping_timeout", None)
@property
def max_message_size(self) -> int:
"""Maximum allowed message size.
If the remote peer sends a message larger than this, the connection
will be closed.
Default is 10MiB.
"""
return self.settings.get(
"websocket_max_message_size", _default_max_message_size
)
def write_message(
self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Consistently raises `WebSocketClosedError`. Previously could
sometimes raise `.StreamClosedError`.
"""
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols: List[str]) -> Optional[str]:
"""Override to implement subprotocol negotiation.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol.
Failure to select a subprotocol does not automatically abort
the connection, although clients may close the connection if
none of their proposed subprotocols was selected.
The list may be empty, in which case this method must return
None. This method is always called exactly once even if no
subprotocols were proposed so that the handler can be advised
of this fact.
.. versionchanged:: 5.1
Previously, this method was called with a list containing
an empty string instead of an empty list if no subprotocols
were proposed by the client.
"""
return None
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol returned by `select_subprotocol`.
.. versionadded:: 5.1
"""
assert self.ws_connection is not None
return self.ws_connection.selected_subprotocol
def get_compression_options(self) -> Optional[Dict[str, Any]]:
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the following compression options:
``compression_level`` specifies the compression level.
``mem_level`` specifies the amount of memory used for the internal compression state.
These parameters are documented in details here:
https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
.. versionadded:: 4.1
.. versionchanged:: 4.5
Added ``compression_level`` and ``mem_level``.
"""
# TODO: Add wbits option.
return None
def open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]:
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
`open` may be a coroutine. `on_message` will not be called until
`open` has returned.
.. versionchanged:: 5.1
``open`` may be a coroutine.
"""
pass
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
"""Handle incoming messages on the WebSocket
This method must be overridden.
.. versionchanged:: 4.5
``on_message`` can be a coroutine.
"""
raise NotImplementedError
def ping(self, data: Union[str, bytes] = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``websocket_ping_interval`` application
setting instead of sending pings manually.
.. versionchanged:: 5.1
The data argument is now optional.
"""
data = utf8(data)
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data: bytes) -> None:
"""Invoked when the response to a ping frame is received."""
pass
def on_ping(self, data: bytes) -> None:
"""Invoked when the a ping frame is received."""
pass
def on_close(self) -> None:
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code: int = None, reason: str = None) -> None:
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin: str) -> bool:
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return ``True`` to accept the request or ``False`` to
reject it. By default, rejects all requests with an origin on
a host other than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
.. warning::
This is an important security measure; don't disable it
without understanding the security implications. In
particular, if your authentication is cookie-based, you
must either restrict the origins allowed by
``check_origin()`` or implement your own XSRF-like
protection for websocket connections. See `these
<https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
`articles
<https://devcenter.heroku.com/articles/websocket-security>`_
for more.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return ``True``::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value: bool) -> None:
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
assert self.ws_connection is not None
self.ws_connection.set_nodelay(value)
def on_connection_close(self) -> None:
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
self._break_cycles()
def on_ws_connection_close(
self, close_code: int = None, close_reason: str = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _break_cycles(self) -> None:
# WebSocketHandlers call finish() early, but we don't want to
# break up reference cycles (which makes it impossible to call
# self.render_string) until after we've really closed the
# connection (if it was established in the first place,
# indicated by status code 101).
if self.get_status() != 101 or self._on_close_called:
super(WebSocketHandler, self)._break_cycles()
def send_error(self, *args: Any, **kwargs: Any) -> None:
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self) -> Optional["WebSocketProtocol"]:
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
params = _WebSocketParams(
ping_interval=self.ping_interval,
ping_timeout=self.ping_timeout,
max_message_size=self.max_message_size,
compression_options=self.get_compression_options(),
)
return WebSocketProtocol13(self, False, params)
return None
def _detach_stream(self) -> IOStream:
# disable non-WS methods
for method in [
"write",
"redirect",
"set_header",
"set_cookie",
"set_status",
"flush",
"finish",
]:
setattr(self, method, _raise_not_supported_for_websockets)
return self.detach()
def _raise_not_supported_for_websockets(*args: Any, **kwargs: Any) -> None:
raise RuntimeError("Method not supported for Web Sockets")
class WebSocketProtocol(abc.ABC):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler: "_WebSocketDelegate") -> None:
self.handler = handler
self.stream = None # type: Optional[IOStream]
self.client_terminated = False
self.server_terminated = False
def _run_callback(
self, callback: Callable, *args: Any, **kwargs: Any
) -> "Optional[Future[Any]]":
"""Runs the given callback with exception handling.
If the callback is a coroutine, returns its Future. On error, aborts the
websocket connection and returns None.
"""
try:
result = callback(*args, **kwargs)
except Exception:
self.handler.log_exception(*sys.exc_info())
self._abort()
return None
else:
if result is not None:
result = gen.convert_yielded(result)
assert self.stream is not None
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
def on_connection_close(self) -> None:
self._abort()
def _abort(self) -> None:
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
if self.stream is not None:
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
@abc.abstractmethod
def close(self, code: int = None, reason: str = None) -> None:
raise NotImplementedError()
@abc.abstractmethod
def is_closing(self) -> bool:
raise NotImplementedError()
@abc.abstractmethod
async def accept_connection(self, handler: WebSocketHandler) -> None:
raise NotImplementedError()
@abc.abstractmethod
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
raise NotImplementedError()
@property
@abc.abstractmethod
def selected_subprotocol(self) -> Optional[str]:
raise NotImplementedError()
@abc.abstractmethod
def write_ping(self, data: bytes) -> None:
raise NotImplementedError()
# The entry points below are used by WebSocketClientConnection,
# which was introduced after we only supported a single version of
# WebSocketProtocol. The WebSocketProtocol/WebSocketProtocol13
# boundary is currently pretty ad-hoc.
@abc.abstractmethod
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
raise NotImplementedError()
@abc.abstractmethod
def start_pinging(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
async def _receive_frame_loop(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def set_nodelay(self, x: bool) -> None:
raise NotImplementedError()
class _PerMessageDeflateCompressor(object):
def __init__(
self,
persistent: bool,
max_wbits: Optional[int],
compression_options: Dict[str, Any] = None,
) -> None:
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError(
"Invalid max_wbits value %r; allowed range 8-%d",
max_wbits,
zlib.MAX_WBITS,
)
self._max_wbits = max_wbits
if (
compression_options is None
or "compression_level" not in compression_options
):
self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
else:
self._compression_level = compression_options["compression_level"]
if compression_options is None or "mem_level" not in compression_options:
self._mem_level = 8
else:
self._mem_level = compression_options["mem_level"]
if persistent:
self._compressor = self._create_compressor() # type: Optional[_Compressor]
else:
self._compressor = None
def _create_compressor(self) -> "_Compressor":
return zlib.compressobj(
self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level
)
def compress(self, data: bytes) -> bytes:
compressor = self._compressor or self._create_compressor()
data = compressor.compress(data) + compressor.flush(zlib.Z_SYNC_FLUSH)
assert data.endswith(b"\x00\x00\xff\xff")
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(
self,
persistent: bool,
max_wbits: Optional[int],
max_message_size: int,
compression_options: Dict[str, Any] = None,
) -> None:
self._max_message_size = max_message_size
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError(
"Invalid max_wbits value %r; allowed range 8-%d",
max_wbits,
zlib.MAX_WBITS,
)
self._max_wbits = max_wbits
if persistent:
self._decompressor = (
self._create_decompressor()
) # type: Optional[_Decompressor]
else:
self._decompressor = None
def _create_decompressor(self) -> "_Decompressor":
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data: bytes) -> bytes:
decompressor = self._decompressor or self._create_decompressor()
result = decompressor.decompress(
data + b"\x00\x00\xff\xff", self._max_message_size
)
if decompressor.unconsumed_tail:
raise _DecompressTooLargeError()
return result
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0F
stream = None # type: IOStream
def __init__(
self,
handler: "_WebSocketDelegate",
mask_outgoing: bool,
params: _WebSocketParams,
) -> None:
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self.params = params
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None # type: Optional[bytes]
self._frame_length = None
self._fragmented_message_buffer = None # type: Optional[bytes]
self._fragmented_message_opcode = None
self._waiting = None # type: object
self._compression_options = params.compression_options
self._decompressor = None # type: Optional[_PerMessageDeflateDecompressor]
self._compressor = None # type: Optional[_PerMessageDeflateCompressor]
self._frame_compressed = None # type: Optional[bool]
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
self.ping_callback = None # type: Optional[PeriodicCallback]
self.last_ping = 0.0
self.last_pong = 0.0
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
# Use a property for this to satisfy the abc.
@property
def selected_subprotocol(self) -> Optional[str]:
return self._selected_subprotocol
@selected_subprotocol.setter
def selected_subprotocol(self, value: Optional[str]) -> None:
self._selected_subprotocol = value
async def accept_connection(self, handler: WebSocketHandler) -> None:
try:
self._handle_websocket_headers(handler)
except ValueError:
handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
await self._accept_connection(handler)
except asyncio.CancelledError:
self._abort()
return
except ValueError:
gen_log.debug("Malformed WebSocket request received", exc_info=True)
self._abort()
return
def _handle_websocket_headers(self, handler: WebSocketHandler) -> None:
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: handler.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key: Union[str, bytes]) -> str:
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self, handler: WebSocketHandler) -> str:
return WebSocketProtocol13.compute_accept_value(
cast(str, handler.request.headers.get("Sec-Websocket-Key"))
)
async def _accept_connection(self, handler: WebSocketHandler) -> None:
subprotocol_header = handler.request.headers.get("Sec-WebSocket-Protocol")
if subprotocol_header:
subprotocols = [s.strip() for s in subprotocol_header.split(",")]
else:
subprotocols = []
self.selected_subprotocol = handler.select_subprotocol(subprotocols)
if self.selected_subprotocol:
assert self.selected_subprotocol in subprotocols
handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol)
extensions = self._parse_extensions_header(handler.request.headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors("server", ext[1], self._compression_options)
if (
"client_max_window_bits" in ext[1]
and ext[1]["client_max_window_bits"] is None
):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]["client_max_window_bits"]
handler.set_header(
"Sec-WebSocket-Extensions",
httputil._encode_header("permessage-deflate", ext[1]),
)
break
handler.clear_header("Content-Type")
handler.set_status(101)
handler.set_header("Upgrade", "websocket")
handler.set_header("Connection", "Upgrade")
handler.set_header("Sec-WebSocket-Accept", self._challenge_response(handler))
handler.finish()
self.stream = handler._detach_stream()
self.start_pinging()
try:
open_result = handler.open(*handler.open_args, **handler.open_kwargs)
if open_result is not None:
await open_result
except Exception:
handler.log_exception(*sys.exc_info())
self._abort()
return
await self._receive_frame_loop()
def _parse_extensions_header(
self, headers: httputil.HTTPHeaders
) -> List[Tuple[str, Dict[str, str]]]:
extensions = headers.get("Sec-WebSocket-Extensions", "")
if extensions:
return [httputil._parse_header(e.strip()) for e in extensions.split(",")]
return []
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers["Upgrade"].lower() == "websocket"
assert headers["Connection"].lower() == "upgrade"
accept = self.compute_accept_value(key)
assert headers["Sec-Websocket-Accept"] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
self._create_compressors("client", ext[1])
else:
raise ValueError("unsupported extension %r", ext)
self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None)
def _get_compressor_options(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Dict[str, Any] = None,
) -> Dict[str, Any]:
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + "_no_context_takeover") not in agreed_parameters
) # type: Dict[str, Any]
wbits_header = agreed_parameters.get(side + "_max_window_bits", None)
if wbits_header is None:
options["max_wbits"] = zlib.MAX_WBITS
else:
options["max_wbits"] = int(wbits_header)
options["compression_options"] = compression_options
return options
def _create_compressors(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Dict[str, Any] = None,
) -> None:
# TODO: handle invalid parameters gracefully
allowed_keys = set(
[
"server_no_context_takeover",
"client_no_context_takeover",
"server_max_window_bits",
"client_max_window_bits",
]
)
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = "client" if (side == "server") else "server"
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters, compression_options)
)
self._decompressor = _PerMessageDeflateDecompressor(
max_message_size=self.params.max_message_size,
**self._get_compressor_options(
other_side, agreed_parameters, compression_options
)
)
def _write_frame(
self, fin: bool, opcode: int, data: bytes, flags: int = 0
) -> "Future[None]":
data_len = len(data)
if opcode & 0x8:
# All control frames MUST have a payload length of 125
# bytes or less and MUST NOT be fragmented.
if not fin:
raise ValueError("control frames may not be fragmented")
if data_len > 125:
raise ValueError("control frame payloads may not exceed 125 bytes")
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if data_len < 126:
frame += struct.pack("B", data_len | mask_bit)
elif data_len <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, data_len)
else:
frame += struct.pack("!BQ", 127 | mask_bit, data_len)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
return self.stream.write(frame)
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
# For historical reasons, write methods in Tornado operate in a semi-synchronous
# mode in which awaiting the Future they return is optional (But errors can
# still be raised). This requires us to go through an awkward dance here
# to transform the errors that may be returned while presenting the same
# semi-synchronous interface.
try:
fut = self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
raise WebSocketClosedError()
async def wrapper() -> None:
try:
await fut
except StreamClosedError:
raise WebSocketClosedError()
return asyncio.ensure_future(wrapper())
def write_ping(self, data: bytes) -> None:
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
async def _receive_frame_loop(self) -> None:
try:
while not self.client_terminated:
await self._receive_frame()
except StreamClosedError:
self._abort()
self.handler.on_ws_connection_close(self.close_code, self.close_reason)
async def _read_bytes(self, n: int) -> bytes:
data = await self.stream.read_bytes(n)
self._wire_bytes_in += n
return data
async def _receive_frame(self) -> None:
# Read the frame header.
data = await self._read_bytes(2)
header, mask_payloadlen = struct.unpack("BB", data)
is_final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
opcode = header & self.OPCODE_MASK
opcode_is_control = opcode & 0x8
if self._decompressor is not None and opcode != 0:
# Compression flag is present in the first frame's header,
# but we can't decompress until we have all the frames of
# the message.
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
is_masked = bool(mask_payloadlen & 0x80)
payloadlen = mask_payloadlen & 0x7F
# Parse and validate the length.
if opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
if payloadlen < 126:
self._frame_length = payloadlen
elif payloadlen == 126:
data = await self._read_bytes(2)
payloadlen = struct.unpack("!H", data)[0]
elif payloadlen == 127:
data = await self._read_bytes(8)
payloadlen = struct.unpack("!Q", data)[0]
new_len = payloadlen
if self._fragmented_message_buffer is not None:
new_len += len(self._fragmented_message_buffer)
if new_len > self.params.max_message_size:
self.close(1009, "message too big")
self._abort()
return
# Read the payload, unmasking if necessary.
if is_masked:
self._frame_mask = await self._read_bytes(4)
data = await self._read_bytes(payloadlen)
if is_masked:
assert self._frame_mask is not None
data = _websocket_mask(self._frame_mask, data)
# Decide what to do with this frame.
if opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not is_final_frame:
# control frames must not be fragmented
self._abort()
return
elif opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if is_final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if not is_final_frame:
self._fragmented_message_opcode = opcode
self._fragmented_message_buffer = data
if is_final_frame:
handled_future = self._handle_message(opcode, data)
if handled_future is not None:
await handled_future
def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]":
"""Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated:
return None
if self._frame_compressed:
assert self._decompressor is not None
try:
data = self._decompressor.decompress(data)
except _DecompressTooLargeError:
self.close(1009, "message too big after decompression")
self._abort()
return None
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return None
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.close_code = struct.unpack(">H", data[:2])[0]
if len(data) > 2:
self.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.close_code)
elif opcode == 0x9:
# Ping
try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self.last_pong = IOLoop.current().time()
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
return None
def close(self, code: int = None, reason: str = None) -> None:
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b""
else:
close_data = struct.pack(">H", code)
if reason is not None:
close_data += utf8(reason)
try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort
)
def is_closing(self) -> bool:
"""Return ``True`` if this connection is closing.
The connection is considered closing if either side has
initiated its closing handshake or if the stream has been
shut down uncleanly.
"""
return self.stream.closed() or self.client_terminated or self.server_terminated
@property
def ping_interval(self) -> Optional[float]:
interval = self.params.ping_interval
if interval is not None:
return interval
return 0
@property
def ping_timeout(self) -> Optional[float]:
timeout = self.params.ping_timeout
if timeout is not None:
return timeout
assert self.ping_interval is not None
return max(3 * self.ping_interval, 30)
def start_pinging(self) -> None:
"""Start sending periodic pings to keep the connection alive"""
assert self.ping_interval is not None
if self.ping_interval > 0:
self.last_ping = self.last_pong = IOLoop.current().time()
self.ping_callback = PeriodicCallback(
self.periodic_ping, self.ping_interval * 1000
)
self.ping_callback.start()
def periodic_ping(self) -> None:
"""Send a ping to keep the websocket alive
Called periodically if the websocket_ping_interval is set and non-zero.
"""
if self.is_closing() and self.ping_callback is not None:
self.ping_callback.stop()
return
# Check for timeout on pong. Make sure that we really have
# sent a recent ping in case the machine with both server and
# client has been suspended since the last ping.
now = IOLoop.current().time()
since_last_pong = now - self.last_pong
since_last_ping = now - self.last_ping
assert self.ping_interval is not None
assert self.ping_timeout is not None
if (
since_last_ping < 2 * self.ping_interval
and since_last_pong > self.ping_timeout
):
self.close()
return
self.write_ping(b"")
self.last_ping = now
def set_nodelay(self, x: bool) -> None:
self.stream.set_nodelay(x)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
protocol = None # type: WebSocketProtocol
def __init__(
self,
request: httpclient.HTTPRequest,
on_message_callback: Callable[[Union[None, str, bytes]], None] = None,
compression_options: Dict[str, Any] = None,
ping_interval: float = None,
ping_timeout: float = None,
max_message_size: int = _default_max_message_size,
subprotocols: Optional[List[str]] = [],
) -> None:
self.connect_future = Future() # type: Future[WebSocketClientConnection]
self.read_queue = Queue(1) # type: Queue[Union[None, str, bytes]]
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self.params = _WebSocketParams(
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
compression_options=compression_options,
)
scheme, sep, rest = request.url.partition(":")
scheme = {"ws": "http", "wss": "https"}[scheme]
request.url = scheme + sep + rest
request.headers.update(
{
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-WebSocket-Key": self.key,
"Sec-WebSocket-Version": "13",
}
)
if subprotocols is not None:
request.headers["Sec-WebSocket-Protocol"] = ",".join(subprotocols)
if compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers[
"Sec-WebSocket-Extensions"
] = "permessage-deflate; client_max_window_bits"
self.tcp_client = TCPClient()
super(WebSocketClientConnection, self).__init__(
None,
request,
lambda: None,
self._on_http_response,
104857600,
self.tcp_client,
65536,
104857600,
)
def close(self, code: int = None, reason: str = None) -> None:
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None # type: ignore
def on_connection_close(self) -> None:
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self._on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def on_ws_connection_close(
self, close_code: int = None, close_reason: str = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _on_http_response(self, response: httpclient.HTTPResponse) -> None:
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(
WebSocketError("Non-websocket response")
)
async def headers_received(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
) -> None:
assert isinstance(start_line, httputil.ResponseStartLine)
if start_line.code != 101:
await super(WebSocketClientConnection, self).headers_received(
start_line, headers
)
return
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol.stream = self.connection.detach()
IOLoop.current().add_callback(self.protocol._receive_frame_loop)
self.protocol.start_pinging()
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None # type: ignore
future_set_result_unless_cancelled(self.connect_future, self)
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
"""Sends a message to the WebSocket server.
If the stream is closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Exception raised on a closed stream changed from `.StreamClosedError`
to `WebSocketClosedError`.
"""
return self.protocol.write_message(message, binary=binary)
def read_message(
self, callback: Callable[["Future[Union[None, str, bytes]]"], None] = None
) -> Awaitable[Union[None, str, bytes]]:
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
awaitable = self.read_queue.get()
if callback is not None:
self.io_loop.add_future(asyncio.ensure_future(awaitable), callback)
return awaitable
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
return self._on_message(message)
def _on_message(
self, message: Union[None, str, bytes]
) -> Optional[Awaitable[None]]:
if self._on_message_callback:
self._on_message_callback(message)
return None
else:
return self.read_queue.put(message)
def ping(self, data: bytes = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``ping_interval`` argument to
`websocket_connect` instead of sending pings manually.
.. versionadded:: 5.1
"""
data = utf8(data)
if self.protocol is None:
raise WebSocketClosedError()
self.protocol.write_ping(data)
def on_pong(self, data: bytes) -> None:
pass
def on_ping(self, data: bytes) -> None:
pass
def get_websocket_protocol(self) -> WebSocketProtocol:
return WebSocketProtocol13(self, mask_outgoing=True, params=self.params)
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol selected by the server.
.. versionadded:: 5.1
"""
return self.protocol.selected_subprotocol
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
assert typ is not None
assert value is not None
app_log.error("Uncaught exception %s", value, exc_info=(typ, value, tb))
def websocket_connect(
url: Union[str, httpclient.HTTPRequest],
callback: Callable[["Future[WebSocketClientConnection]"], None] = None,
connect_timeout: float = None,
on_message_callback: Callable[[Union[None, str, bytes]], None] = None,
compression_options: Dict[str, Any] = None,
ping_interval: float = None,
ping_timeout: float = None,
max_message_size: int = _default_max_message_size,
subprotocols: List[str] = None,
) -> "Awaitable[WebSocketClientConnection]":
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
``subprotocols`` may be a list of strings specifying proposed
subprotocols. The selected protocol may be found on the
``selected_subprotocol`` attribute of the connection object
when the connection is complete.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.1
Added the ``subprotocols`` argument.
"""
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = cast(
httpclient.HTTPRequest,
httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS),
)
conn = WebSocketClientConnection(
request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
subprotocols=subprotocols,
)
if callback is not None:
IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future
| BugsInPy/BugsInPy/temp/projects/tornado/bug-1-fixed/tornado/tornado/websocket.py,BugsInPy/BugsInPy/temp/projects/tornado/bug-1-buggy/tornado/tornado/websocket.py |