code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class WantAction(BaseModel): """The act of expressing a desire about the object. An agent wants an object. References: https://schema.org/WantAction Note: Model Depth 5 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. endTime: (Optional[Union[List[Union[datetime, str, Any]], datetime, str, Any]]): The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. E.g. John wrote a book from January to *December*. For media, including audio and video, it's the time offset of the end of a clip within a larger file.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions. provider: (Optional[Union[List[Union[str, Any]], str, Any]]): The service provider, service operator, or service performer; the goods producer. Another party (a seller) may offer those services or goods on behalf of the provider. A provider may also serve as the seller. startTime: (Optional[Union[List[Union[datetime, str, Any]], datetime, str, Any]]): The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. E.g. John wrote a book from *January* to December. For media, including audio and video, it's the time offset of the start of a clip within a larger file.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions. result: (Optional[Union[List[Union[str, Any]], str, Any]]): The result produced in the action. E.g. John wrote *a book*. actionStatus: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates the current disposition of the Action. agent: (Optional[Union[List[Union[str, Any]], str, Any]]): The direct performer or driver of the action (animate or inanimate). E.g. *John* wrote a book. instrument: (Optional[Union[List[Union[str, Any]], str, Any]]): The object that helped the agent perform the action. E.g. John wrote a book with *a pen*. object: (Optional[Union[List[Union[str, Any]], str, Any]]): The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). E.g. John read *a book*. error: (Optional[Union[List[Union[str, Any]], str, Any]]): For failed actions, more information on the cause of the failure. target: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a target EntryPoint, or url, for an Action. location: (Union[List[Union[str, Any]], str, Any]): The location of, for example, where an event is happening, where an organization is located, or where an action takes place. participant: (Optional[Union[List[Union[str, Any]], str, Any]]): Other co-agents that participated in the action indirectly. E.g. John wrote a book with *Steve*. """ type_: str = Field(default="WantAction", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) endTime: Optional[ Union[List[Union[datetime, str, Any]], datetime, str, Any] ] = Field( default=None, description="The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation)," "the time that it is expected to end. For actions that span a period of time, when the action" "was performed. E.g. John wrote a book from January to *December*. For media, including" "audio and video, it's the time offset of the end of a clip within a larger file.Note that" "Event uses startDate/endDate instead of startTime/endTime, even when describing" "dates with times. This situation may be clarified in future revisions.", ) provider: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The service provider, service operator, or service performer; the goods producer." "Another party (a seller) may offer those services or goods on behalf of the provider." "A provider may also serve as the seller.", ) startTime: Optional[ Union[List[Union[datetime, str, Any]], datetime, str, Any] ] = Field( default=None, description="The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation)," "the time that it is expected to start. For actions that span a period of time, when the action" "was performed. E.g. John wrote a book from *January* to December. For media, including" "audio and video, it's the time offset of the start of a clip within a larger file.Note that" "Event uses startDate/endDate instead of startTime/endTime, even when describing" "dates with times. This situation may be clarified in future revisions.", ) result: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The result produced in the action. E.g. John wrote *a book*.", ) actionStatus: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates the current disposition of the Action.", ) agent: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The direct performer or driver of the action (animate or inanimate). E.g. *John* wrote" "a book.", ) instrument: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The object that helped the agent perform the action. E.g. John wrote a book with *a pen*.", ) object: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The object upon which the action is carried out, whose state is kept intact or changed." "Also known as the semantic roles patient, affected or undergoer (which change their" "state) or theme (which doesn't). E.g. John read *a book*.", ) error: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="For failed actions, more information on the cause of the failure.", ) target: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="Indicates a target EntryPoint, or url, for an Action.", ) location: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The location of, for example, where an event is happening, where an organization is located," "or where an action takes place.", ) participant: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Other co-agents that participated in the action indirectly. E.g. John wrote a book with" "*Steve*.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/WantAction.py
0.934178
0.411702
WantAction.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class MoneyTransfer(BaseModel): """The act of transferring money from one place to another place. This may occur electronically or physically. References: https://schema.org/MoneyTransfer Note: Model Depth 4 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. endTime: (Optional[Union[List[Union[datetime, str, Any]], datetime, str, Any]]): The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. E.g. John wrote a book from January to *December*. For media, including audio and video, it's the time offset of the end of a clip within a larger file.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions. provider: (Optional[Union[List[Union[str, Any]], str, Any]]): The service provider, service operator, or service performer; the goods producer. Another party (a seller) may offer those services or goods on behalf of the provider. A provider may also serve as the seller. startTime: (Optional[Union[List[Union[datetime, str, Any]], datetime, str, Any]]): The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. E.g. John wrote a book from *January* to December. For media, including audio and video, it's the time offset of the start of a clip within a larger file.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions. result: (Optional[Union[List[Union[str, Any]], str, Any]]): The result produced in the action. E.g. John wrote *a book*. actionStatus: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates the current disposition of the Action. agent: (Optional[Union[List[Union[str, Any]], str, Any]]): The direct performer or driver of the action (animate or inanimate). E.g. *John* wrote a book. instrument: (Optional[Union[List[Union[str, Any]], str, Any]]): The object that helped the agent perform the action. E.g. John wrote a book with *a pen*. object: (Optional[Union[List[Union[str, Any]], str, Any]]): The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). E.g. John read *a book*. error: (Optional[Union[List[Union[str, Any]], str, Any]]): For failed actions, more information on the cause of the failure. target: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a target EntryPoint, or url, for an Action. location: (Union[List[Union[str, Any]], str, Any]): The location of, for example, where an event is happening, where an organization is located, or where an action takes place. participant: (Optional[Union[List[Union[str, Any]], str, Any]]): Other co-agents that participated in the action indirectly. E.g. John wrote a book with *Steve*. toLocation: (Optional[Union[List[Union[str, Any]], str, Any]]): A sub property of location. The final location of the object or the agent after the action. fromLocation: (Optional[Union[List[Union[str, Any]], str, Any]]): A sub property of location. The original location of the object or the agent before the action. amount: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): The amount of money. beneficiaryBank: (Union[List[Union[str, Any]], str, Any]): A bank or bank’s branch, financial institution or international financial institution operating the beneficiary’s bank account or releasing funds for the beneficiary. """ type_: str = Field(default="MoneyTransfer", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) endTime: Optional[ Union[List[Union[datetime, str, Any]], datetime, str, Any] ] = Field( default=None, description="The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation)," "the time that it is expected to end. For actions that span a period of time, when the action" "was performed. E.g. John wrote a book from January to *December*. For media, including" "audio and video, it's the time offset of the end of a clip within a larger file.Note that" "Event uses startDate/endDate instead of startTime/endTime, even when describing" "dates with times. This situation may be clarified in future revisions.", ) provider: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The service provider, service operator, or service performer; the goods producer." "Another party (a seller) may offer those services or goods on behalf of the provider." "A provider may also serve as the seller.", ) startTime: Optional[ Union[List[Union[datetime, str, Any]], datetime, str, Any] ] = Field( default=None, description="The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation)," "the time that it is expected to start. For actions that span a period of time, when the action" "was performed. E.g. John wrote a book from *January* to December. For media, including" "audio and video, it's the time offset of the start of a clip within a larger file.Note that" "Event uses startDate/endDate instead of startTime/endTime, even when describing" "dates with times. This situation may be clarified in future revisions.", ) result: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The result produced in the action. E.g. John wrote *a book*.", ) actionStatus: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates the current disposition of the Action.", ) agent: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The direct performer or driver of the action (animate or inanimate). E.g. *John* wrote" "a book.", ) instrument: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The object that helped the agent perform the action. E.g. John wrote a book with *a pen*.", ) object: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The object upon which the action is carried out, whose state is kept intact or changed." "Also known as the semantic roles patient, affected or undergoer (which change their" "state) or theme (which doesn't). E.g. John read *a book*.", ) error: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="For failed actions, more information on the cause of the failure.", ) target: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="Indicates a target EntryPoint, or url, for an Action.", ) location: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The location of, for example, where an event is happening, where an organization is located," "or where an action takes place.", ) participant: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Other co-agents that participated in the action indirectly. E.g. John wrote a book with" "*Steve*.", ) toLocation: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A sub property of location. The final location of the object or the agent after the action.", ) fromLocation: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A sub property of location. The original location of the object or the agent before the" "action.", ) amount: Optional[ Union[ List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat, ] ] = Field( default=None, description="The amount of money.", ) beneficiaryBank: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A bank or bank’s branch, financial institution or international financial institution" "operating the beneficiary’s bank account or releasing funds for the beneficiary.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/MoneyTransfer.py
0.909546
0.379263
MoneyTransfer.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class RestockingFees(BaseModel): """Specifies that the customer must pay a restocking fee when returning a product. References: https://schema.org/RestockingFees Note: Model Depth 5 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. supersededBy: (Optional[Union[List[Union[str, Any]], str, Any]]): Relates a term (i.e. a property, class or enumeration) to one that supersedes it. """ type_: str = Field(default="RestockingFees", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) supersededBy: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Relates a term (i.e. a property, class or enumeration) to one that supersedes it.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/RestockingFees.py
0.948597
0.333368
RestockingFees.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class VenueMap(BaseModel): """A venue map (e.g. for malls, auditoriums, museums, etc.). References: https://schema.org/VenueMap Note: Model Depth 5 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. supersededBy: (Optional[Union[List[Union[str, Any]], str, Any]]): Relates a term (i.e. a property, class or enumeration) to one that supersedes it. """ type_: str = Field(default="VenueMap", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) supersededBy: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Relates a term (i.e. a property, class or enumeration) to one that supersedes it.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/VenueMap.py
0.932376
0.316448
VenueMap.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class BeautySalon(BaseModel): """Beauty salon. References: https://schema.org/BeautySalon Note: Model Depth 5 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. serviceArea: (Optional[Union[List[Union[str, Any]], str, Any]]): The geographic area where the service is provided. founder: (Optional[Union[List[Union[str, Any]], str, Any]]): A person who founded this organization. isicV4: (Union[List[Union[str, Any]], str, Any]): The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place. hasPOS: (Optional[Union[List[Union[str, Any]], str, Any]]): Points-of-Sales operated by the organization or person. globalLocationNumber: (Union[List[Union[str, Any]], str, Any]): The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations. member: (Optional[Union[List[Union[str, Any]], str, Any]]): A member of an Organization or a ProgramMembership. Organizations can be members of organizations; ProgramMembership is typically for individuals. knowsAbout: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Of a [[Person]], and less typically of an [[Organization]], to indicate a topic that is known about - suggesting possible expertise but not implying it. We do not distinguish skill levels here, or relate this to educational content, events, objectives or [[JobPosting]] descriptions. makesOffer: (Optional[Union[List[Union[str, Any]], str, Any]]): A pointer to products or services offered by the organization or person. ownershipFundingInfo: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]]), a description of organizational ownership structure; funding and grants. In a news/media setting, this is with particular reference to editorial independence. Note that the [[funder]] is also available and can be used to make basic funder information machine-readable. founders: (Optional[Union[List[Union[str, Any]], str, Any]]): A person who founded this organization. legalName: (Union[List[Union[str, Any]], str, Any]): The official name of the organization, e.g. the registered company name. actionableFeedbackPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For a [[NewsMediaOrganization]] or other news-related [[Organization]], a statement about public engagement activities (for news media, the newsroom’s), including involving the public - digitally or otherwise -- in coverage decisions, reporting and activities after publication. areaServed: (Union[List[Union[str, Any]], str, Any]): The geographic area where a service or offered item is provided. parentOrganization: (Optional[Union[List[Union[str, Any]], str, Any]]): The larger organization that this organization is a [[subOrganization]] of, if any. slogan: (Union[List[Union[str, Any]], str, Any]): A slogan or motto associated with the item. department: (Optional[Union[List[Union[str, Any]], str, Any]]): A relationship between an organization and a department of that organization, also described as an organization (allowing different urls, logos, opening hours). For example: a store with a pharmacy, or a bakery with a cafe. keywords: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Keywords or tags used to describe some item. Multiple textual entries in a keywords list are typically delimited by commas, or by repeating the property. reviews: (Optional[Union[List[Union[str, Any]], str, Any]]): Review of the item. memberOf: (Optional[Union[List[Union[str, Any]], str, Any]]): An Organization (or ProgramMembership) to which this Person or Organization belongs. publishingPrinciples: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): The publishingPrinciples property indicates (typically via [[URL]]) a document describing the editorial principles of an [[Organization]] (or individual, e.g. a [[Person]] writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles are those of the party primarily responsible for the creation of the [[CreativeWork]].While such policies are most typically expressed in natural language, sometimes related information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology. employee: (Optional[Union[List[Union[str, Any]], str, Any]]): Someone working for this organization. award: (Union[List[Union[str, Any]], str, Any]): An award won by or for this item. email: (Union[List[Union[str, Any]], str, Any]): Email address. contactPoints: (Optional[Union[List[Union[str, Any]], str, Any]]): A contact point for a person or organization. diversityStaffingReport: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]]), a report on staffing diversity issues. In a news context this might be for example ASNE or RTDNA (US) reports, or self-reported. foundingDate: (Optional[Union[List[Union[str, Any, date]], str, Any, date]]): The date that this organization was founded. owns: (Optional[Union[List[Union[str, Any]], str, Any]]): Products owned by the organization or person. awards: (Union[List[Union[str, Any]], str, Any]): Awards won by or for this item. review: (Optional[Union[List[Union[str, Any]], str, Any]]): A review of the item. dissolutionDate: (Optional[Union[List[Union[str, Any, date]], str, Any, date]]): The date that this organization was dissolved. funding: (Optional[Union[List[Union[str, Any]], str, Any]]): A [[Grant]] that directly or indirectly provide funding or sponsorship for this item. See also [[ownershipFundingInfo]]. interactionStatistic: (Optional[Union[List[Union[str, Any]], str, Any]]): The number of interactions for the CreativeWork using the WebSite or SoftwareApplication. The most specific child type of InteractionCounter should be used. events: (Optional[Union[List[Union[str, Any]], str, Any]]): Upcoming or past events associated with this place or organization. seeks: (Optional[Union[List[Union[str, Any]], str, Any]]): A pointer to products or services sought by the organization or person (demand). employees: (Optional[Union[List[Union[str, Any]], str, Any]]): People working for this organization. unnamedSourcesPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For an [[Organization]] (typically a [[NewsMediaOrganization]]), a statement about policy on use of unnamed sources and the decision process required. subOrganization: (Optional[Union[List[Union[str, Any]], str, Any]]): A relationship between two organizations where the first includes the second, e.g., as a subsidiary. See also: the more specific 'department' property. foundingLocation: (Optional[Union[List[Union[str, Any]], str, Any]]): The place where the Organization was founded. funder: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports (sponsors) something through some kind of financial contribution. iso6523Code: (Union[List[Union[str, Any]], str, Any]): An organization identifier as defined in ISO 6523(-1). Note that many existing organization identifiers such as [leiCode](https://schema.org/leiCode), [duns](https://schema.org/duns) and [vatID](https://schema.org/vatID) can be expressed as an ISO 6523 identifier by setting the ICD part of the ISO 6523 identifier accordingly. diversityPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Statement on diversity policy by an [[Organization]] e.g. a [[NewsMediaOrganization]]. For a [[NewsMediaOrganization]], a statement describing the newsroom’s diversity policy on both staffing and sources, typically providing staffing data. hasMerchantReturnPolicy: (Optional[Union[List[Union[str, Any]], str, Any]]): Specifies a MerchantReturnPolicy that may be applicable. event: (Optional[Union[List[Union[str, Any]], str, Any]]): Upcoming or past event associated with this place, organization, or action. duns: (Union[List[Union[str, Any]], str, Any]): The Dun & Bradstreet DUNS number for identifying an organization or business person. alumni: (Optional[Union[List[Union[str, Any]], str, Any]]): Alumni of an organization. ethicsPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Statement about ethics policy, e.g. of a [[NewsMediaOrganization]] regarding journalistic and publishing practices, or of a [[Restaurant]], a page describing food source policies. In the case of a [[NewsMediaOrganization]], an ethicsPolicy is typically a statement describing the personal, organizational, and corporate standards of behavior expected by the organization. leiCode: (Union[List[Union[str, Any]], str, Any]): An organization identifier that uniquely identifies a legal entity as defined in ISO 17442. vatID: (Union[List[Union[str, Any]], str, Any]): The Value-added Tax ID of the organization or person. knowsLanguage: (Union[List[Union[str, Any]], str, Any]): Of a [[Person]], and less typically of an [[Organization]], to indicate a known language. We do not distinguish skill levels or reading/writing/speaking/signing here. Use language codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47). correctionsPolicy: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): For an [[Organization]] (e.g. [[NewsMediaOrganization]]), a statement describing (in news media, the newsroom’s) disclosure and correction policy for errors. logo: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An associated logo. hasCredential: (Optional[Union[List[Union[str, Any]], str, Any]]): A credential awarded to the Person or Organization. address: (Union[List[Union[str, Any]], str, Any]): Physical address of the item. brand: (Optional[Union[List[Union[str, Any]], str, Any]]): The brand(s) associated with a product or service, or the brand(s) maintained by an organization or business person. nonprofitStatus: (Optional[Union[List[Union[str, Any]], str, Any]]): nonprofitStatus indicates the legal status of a non-profit organization in its primary place of business. contactPoint: (Optional[Union[List[Union[str, Any]], str, Any]]): A contact point for a person or organization. hasOfferCatalog: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates an OfferCatalog listing for this Organization, Person, or Service. members: (Optional[Union[List[Union[str, Any]], str, Any]]): A member of this organization. aggregateRating: (Optional[Union[List[Union[str, Any]], str, Any]]): The overall rating, based on a collection of reviews or ratings, of the item. faxNumber: (Union[List[Union[str, Any]], str, Any]): The fax number. telephone: (Union[List[Union[str, Any]], str, Any]): The telephone number. taxID: (Union[List[Union[str, Any]], str, Any]): The Tax / Fiscal ID of the organization or person, e.g. the TIN in the US or the CIF/NIF in Spain. naics: (Union[List[Union[str, Any]], str, Any]): The North American Industry Classification System (NAICS) code for a particular organization or business person. location: (Union[List[Union[str, Any]], str, Any]): The location of, for example, where an event is happening, where an organization is located, or where an action takes place. numberOfEmployees: (Optional[Union[List[Union[str, Any]], str, Any]]): The number of employees in an organization, e.g. business. sponsor: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports a thing through a pledge, promise, or financial contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event. potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. geoCovers: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents a relationship between two geometries (or the places they represent), relating a covering geometry to a covered geometry. "Every point of b is a point of (the interior or boundary of) a". As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). longitude: (Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]): The longitude of a location. For example ```-122.08585``` ([WGS 84](https://en.wikipedia.org/wiki/World_Geodetic_System)). smokingAllowed: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room. isicV4: (Union[List[Union[str, Any]], str, Any]): The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place. globalLocationNumber: (Union[List[Union[str, Any]], str, Any]): The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations. amenityFeature: (Optional[Union[List[Union[str, Any]], str, Any]]): An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs. additionalProperty: (Optional[Union[List[Union[str, Any]], str, Any]]): A property-value pair representing an additional characteristic of the entity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. https://schema.org/width, https://schema.org/color, https://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism. slogan: (Union[List[Union[str, Any]], str, Any]): A slogan or motto associated with the item. photos: (Optional[Union[List[Union[str, Any]], str, Any]]): Photographs of this place. keywords: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Keywords or tags used to describe some item. Multiple textual entries in a keywords list are typically delimited by commas, or by repeating the property. reviews: (Optional[Union[List[Union[str, Any]], str, Any]]): Review of the item. tourBookingPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A page providing information on how to book a tour of some [[Place]], such as an [[Accommodation]] or [[ApartmentComplex]] in a real estate setting, as well as other kinds of tours as appropriate. geoWithin: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents a relationship between two geometries (or the places they represent), relating a geometry to one that contains it, i.e. it is inside (i.e. within) its interior. As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). containsPlace: (Optional[Union[List[Union[str, Any]], str, Any]]): The basic containment relation between a place and another that it contains. review: (Optional[Union[List[Union[str, Any]], str, Any]]): A review of the item. hasMap: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A URL to a map of the place. containedIn: (Optional[Union[List[Union[str, Any]], str, Any]]): The basic containment relation between a place and one that contains it. events: (Optional[Union[List[Union[str, Any]], str, Any]]): Upcoming or past events associated with this place or organization. geoOverlaps: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents a relationship between two geometries (or the places they represent), relating a geometry to another that geospatially overlaps it, i.e. they have some but not all points in common. As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). geoEquals: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents spatial relations in which two geometries (or the places they represent) are topologically equal, as defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). "Two geometries are topologically equal if their interiors intersect and no part of the interior or boundary of one geometry intersects the exterior of the other" (a symmetric relationship). maps: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A URL to a map of the place. isAccessibleForFree: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): A flag to signal that the item, event, or place is accessible for free. event: (Optional[Union[List[Union[str, Any]], str, Any]]): Upcoming or past event associated with this place, organization, or action. photo: (Optional[Union[List[Union[str, Any]], str, Any]]): A photograph of this place. containedInPlace: (Optional[Union[List[Union[str, Any]], str, Any]]): The basic containment relation between a place and one that contains it. logo: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An associated logo. geoCrosses: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents a relationship between two geometries (or the places they represent), relating a geometry to another that crosses it: "a crosses b: they have some but not all interior points in common, and the dimension of the intersection is less than that of at least one of them". As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). address: (Union[List[Union[str, Any]], str, Any]): Physical address of the item. geo: (Optional[Union[List[Union[str, Any]], str, Any]]): The geo coordinates of the place. openingHoursSpecification: (Optional[Union[List[Union[str, Any]], str, Any]]): The opening hours of a certain place. geoDisjoint: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents spatial relations in which two geometries (or the places they represent) are topologically disjoint: "they have no point in common. They form a set of disconnected geometries." (A symmetric relationship, as defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).) geoIntersects: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents spatial relations in which two geometries (or the places they represent) have at least one point in common. As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). latitude: (Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]): The latitude of a location. For example ```37.42242``` ([WGS 84](https://en.wikipedia.org/wiki/World_Geodetic_System)). maximumAttendeeCapacity: (Optional[Union[List[Union[str, int, Any]], str, int, Any]]): The total number of individuals that may attend an event or venue. aggregateRating: (Optional[Union[List[Union[str, Any]], str, Any]]): The overall rating, based on a collection of reviews or ratings, of the item. map: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A URL to a map of the place. branchCode: (Union[List[Union[str, Any]], str, Any]): A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch. faxNumber: (Union[List[Union[str, Any]], str, Any]): The fax number. publicAccess: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value geoTouches: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents spatial relations in which two geometries (or the places they represent) touch: "they have at least one boundary point in common, but no interior points." (A symmetric relationship, as defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).) geoCoveredBy: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents a relationship between two geometries (or the places they represent), relating a geometry to another that covers it. As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). telephone: (Union[List[Union[str, Any]], str, Any]): The telephone number. hasDriveThroughService: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): Indicates whether some facility (e.g. [[FoodEstablishment]], [[CovidTestingFacility]]) offers a service that can be used by driving through in a car. In the case of [[CovidTestingFacility]] such facilities could potentially help with social distancing from other potentially-infected users. specialOpeningHoursSpecification: (Optional[Union[List[Union[str, Any]], str, Any]]): The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]]. geoContains: (Optional[Union[List[Union[str, Any]], str, Any]]): Represents a relationship between two geometries (or the places they represent), relating a containing geometry to a contained geometry. "a contains b iff no points of b lie in the exterior of a, and at least one point of the interior of b lies in the interior of a". As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM). priceRange: (Union[List[Union[str, Any]], str, Any]): The price range of the business, for example ```$$$```. currenciesAccepted: (Union[List[Union[str, Any]], str, Any]): The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217), e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies, e.g. "BTC"; well known names for [Local Exchange Trading Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types, e.g. "Ithaca HOUR". branchOf: (Optional[Union[List[Union[str, Any]], str, Any]]): The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical) [[branch]]. paymentAccepted: (Union[List[Union[str, Any]], str, Any]): Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc. openingHours: (Union[List[Union[str, Any]], str, Any]): The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 format. For example, 3pm is specified as ```15:00```, 10am as ```10:00```. * Here is an example: <code>&lt;time itemprop="openingHours" datetime=&quot;Tu,Th 16:00-20:00&quot;&gt;Tuesdays and Thursdays 4-8pm&lt;/time&gt;</code>.* If a business is open 7 days a week, then it can be specified as <code>&lt;time itemprop=&quot;openingHours&quot; datetime=&quot;Mo-Su&quot;&gt;Monday through Sunday, all day&lt;/time&gt;</code>. """ type_: str = Field(default="BeautySalon", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) serviceArea: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The geographic area where the service is provided.", ) founder: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person who founded this organization.", ) isicV4: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The International Standard of Industrial Classification of All Economic Activities" "(ISIC), Revision 4 code for a particular organization, business person, or place.", ) hasPOS: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Points-of-Sales operated by the organization or person.", ) globalLocationNumber: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred" "to as International Location Number or ILN) of the respective organization, person," "or place. The GLN is a 13-digit number used to identify parties and physical locations.", ) member: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A member of an Organization or a ProgramMembership. Organizations can be members of" "organizations; ProgramMembership is typically for individuals.", ) knowsAbout: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Of a [[Person]], and less typically of an [[Organization]], to indicate a topic that" "is known about - suggesting possible expertise but not implying it. We do not distinguish" "skill levels here, or relate this to educational content, events, objectives or [[JobPosting]]" "descriptions.", ) makesOffer: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A pointer to products or services offered by the organization or person.", ) ownershipFundingInfo: Union[ List[Union[str, AnyUrl, Any]], str, AnyUrl, Any ] = Field( default=None, description="For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]])," "a description of organizational ownership structure; funding and grants. In a news/media" "setting, this is with particular reference to editorial independence. Note that the" "[[funder]] is also available and can be used to make basic funder information machine-readable.", ) founders: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person who founded this organization.", ) legalName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The official name of the organization, e.g. the registered company name.", ) actionableFeedbackPolicy: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="For a [[NewsMediaOrganization]] or other news-related [[Organization]], a statement" "about public engagement activities (for news media, the newsroom’s), including involving" "the public - digitally or otherwise -- in coverage decisions, reporting and activities" "after publication.", ) areaServed: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The geographic area where a service or offered item is provided.", ) parentOrganization: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The larger organization that this organization is a [[subOrganization]] of, if any.", ) slogan: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A slogan or motto associated with the item.", ) department: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A relationship between an organization and a department of that organization, also" "described as an organization (allowing different urls, logos, opening hours). For" "example: a store with a pharmacy, or a bakery with a cafe.", ) keywords: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Keywords or tags used to describe some item. Multiple textual entries in a keywords list" "are typically delimited by commas, or by repeating the property.", ) reviews: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Review of the item.", ) memberOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An Organization (or ProgramMembership) to which this Person or Organization belongs.", ) publishingPrinciples: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="The publishingPrinciples property indicates (typically via [[URL]]) a document describing" "the editorial principles of an [[Organization]] (or individual, e.g. a [[Person]]" "writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity" "policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles" "are those of the party primarily responsible for the creation of the [[CreativeWork]].While" "such policies are most typically expressed in natural language, sometimes related" "information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology.", ) employee: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Someone working for this organization.", ) award: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An award won by or for this item.", ) email: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Email address.", ) contactPoints: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A contact point for a person or organization.", ) diversityStaffingReport: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="For an [[Organization]] (often but not necessarily a [[NewsMediaOrganization]])," "a report on staffing diversity issues. In a news context this might be for example ASNE" "or RTDNA (US) reports, or self-reported.", ) foundingDate: Optional[Union[List[Union[str, Any, date]], str, Any, date]] = Field( default=None, description="The date that this organization was founded.", ) owns: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Products owned by the organization or person.", ) awards: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Awards won by or for this item.", ) review: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A review of the item.", ) dissolutionDate: Optional[ Union[List[Union[str, Any, date]], str, Any, date] ] = Field( default=None, description="The date that this organization was dissolved.", ) funding: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A [[Grant]] that directly or indirectly provide funding or sponsorship for this item." "See also [[ownershipFundingInfo]].", ) interactionStatistic: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The number of interactions for the CreativeWork using the WebSite or SoftwareApplication." "The most specific child type of InteractionCounter should be used.", ) events: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Upcoming or past events associated with this place or organization.", ) seeks: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A pointer to products or services sought by the organization or person (demand).", ) employees: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="People working for this organization.", ) unnamedSourcesPolicy: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="For an [[Organization]] (typically a [[NewsMediaOrganization]]), a statement about" "policy on use of unnamed sources and the decision process required.", ) subOrganization: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A relationship between two organizations where the first includes the second, e.g.," "as a subsidiary. See also: the more specific 'department' property.", ) foundingLocation: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The place where the Organization was founded.", ) funder: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person or organization that supports (sponsors) something through some kind of financial" "contribution.", ) iso6523Code: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An organization identifier as defined in ISO 6523(-1). Note that many existing organization" "identifiers such as [leiCode](https://schema.org/leiCode), [duns](https://schema.org/duns)" "and [vatID](https://schema.org/vatID) can be expressed as an ISO 6523 identifier" "by setting the ICD part of the ISO 6523 identifier accordingly.", ) diversityPolicy: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Statement on diversity policy by an [[Organization]] e.g. a [[NewsMediaOrganization]]." "For a [[NewsMediaOrganization]], a statement describing the newsroom’s diversity" "policy on both staffing and sources, typically providing staffing data.", ) hasMerchantReturnPolicy: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Specifies a MerchantReturnPolicy that may be applicable.", ) event: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Upcoming or past event associated with this place, organization, or action.", ) duns: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The Dun & Bradstreet DUNS number for identifying an organization or business person.", ) alumni: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Alumni of an organization.", ) ethicsPolicy: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Statement about ethics policy, e.g. of a [[NewsMediaOrganization]] regarding journalistic" "and publishing practices, or of a [[Restaurant]], a page describing food source policies." "In the case of a [[NewsMediaOrganization]], an ethicsPolicy is typically a statement" "describing the personal, organizational, and corporate standards of behavior expected" "by the organization.", ) leiCode: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An organization identifier that uniquely identifies a legal entity as defined in ISO" "17442.", ) vatID: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The Value-added Tax ID of the organization or person.", ) knowsLanguage: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Of a [[Person]], and less typically of an [[Organization]], to indicate a known language." "We do not distinguish skill levels or reading/writing/speaking/signing here. Use" "language codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47).", ) correctionsPolicy: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="For an [[Organization]] (e.g. [[NewsMediaOrganization]]), a statement describing" "(in news media, the newsroom’s) disclosure and correction policy for errors.", ) logo: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An associated logo.", ) hasCredential: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A credential awarded to the Person or Organization.", ) address: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Physical address of the item.", ) brand: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The brand(s) associated with a product or service, or the brand(s) maintained by an organization" "or business person.", ) nonprofitStatus: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="nonprofitStatus indicates the legal status of a non-profit organization in its primary" "place of business.", ) contactPoint: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A contact point for a person or organization.", ) hasOfferCatalog: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates an OfferCatalog listing for this Organization, Person, or Service.", ) members: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A member of this organization.", ) aggregateRating: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The overall rating, based on a collection of reviews or ratings, of the item.", ) faxNumber: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The fax number.", ) telephone: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The telephone number.", ) taxID: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The Tax / Fiscal ID of the organization or person, e.g. the TIN in the US or the CIF/NIF in" "Spain.", ) naics: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The North American Industry Classification System (NAICS) code for a particular organization" "or business person.", ) location: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The location of, for example, where an event is happening, where an organization is located," "or where an action takes place.", ) numberOfEmployees: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The number of employees in an organization, e.g. business.", ) sponsor: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person or organization that supports a thing through a pledge, promise, or financial" "contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event.", ) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) geoCovers: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents a relationship between two geometries (or the places they represent), relating" 'a covering geometry to a covered geometry. "Every point of b is a point of (the interior' 'or boundary of) a". As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).', ) longitude: Union[ List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat ] = Field( default=None, description="The longitude of a location. For example ```-122.08585``` ([WGS 84](https://en.wikipedia.org/wiki/World_Geodetic_System)).", ) smokingAllowed: Optional[ Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any] ] = Field( default=None, description="Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or" "hotel room.", ) isicV4: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The International Standard of Industrial Classification of All Economic Activities" "(ISIC), Revision 4 code for a particular organization, business person, or place.", ) globalLocationNumber: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred" "to as International Location Number or ILN) of the respective organization, person," "or place. The GLN is a 13-digit number used to identify parties and physical locations.", ) amenityFeature: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic" "property does not make a statement about whether the feature is included in an offer for" "the main accommodation or available at extra costs.", ) additionalProperty: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A property-value pair representing an additional characteristic of the entity, e.g." "a product feature or another characteristic for which there is no matching property" "in schema.org.Note: Publishers should be aware that applications designed to use specific" "schema.org properties (e.g. https://schema.org/width, https://schema.org/color," "https://schema.org/gtin13, ...) will typically expect such data to be provided using" "those properties, rather than using the generic property/value mechanism.", ) slogan: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A slogan or motto associated with the item.", ) photos: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Photographs of this place.", ) keywords: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Keywords or tags used to describe some item. Multiple textual entries in a keywords list" "are typically delimited by commas, or by repeating the property.", ) reviews: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Review of the item.", ) tourBookingPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="A page providing information on how to book a tour of some [[Place]], such as an [[Accommodation]]" "or [[ApartmentComplex]] in a real estate setting, as well as other kinds of tours as appropriate.", ) geoWithin: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents a relationship between two geometries (or the places they represent), relating" "a geometry to one that contains it, i.e. it is inside (i.e. within) its interior. As defined" "in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).", ) containsPlace: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The basic containment relation between a place and another that it contains.", ) review: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A review of the item.", ) hasMap: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="A URL to a map of the place.", ) containedIn: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The basic containment relation between a place and one that contains it.", ) events: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Upcoming or past events associated with this place or organization.", ) geoOverlaps: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents a relationship between two geometries (or the places they represent), relating" "a geometry to another that geospatially overlaps it, i.e. they have some but not all points" "in common. As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).", ) geoEquals: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents spatial relations in which two geometries (or the places they represent)" "are topologically equal, as defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM)." '"Two geometries are topologically equal if their interiors intersect and no part of' 'the interior or boundary of one geometry intersects the exterior of the other" (a symmetric' "relationship).", ) maps: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="A URL to a map of the place.", ) isAccessibleForFree: Optional[ Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any] ] = Field( default=None, description="A flag to signal that the item, event, or place is accessible for free.", ) event: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Upcoming or past event associated with this place, organization, or action.", ) photo: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A photograph of this place.", ) containedInPlace: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The basic containment relation between a place and one that contains it.", ) logo: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An associated logo.", ) geoCrosses: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents a relationship between two geometries (or the places they represent), relating" 'a geometry to another that crosses it: "a crosses b: they have some but not all interior' "points in common, and the dimension of the intersection is less than that of at least one" 'of them". As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).', ) address: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Physical address of the item.", ) geo: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The geo coordinates of the place.", ) openingHoursSpecification: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The opening hours of a certain place.", ) geoDisjoint: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents spatial relations in which two geometries (or the places they represent)" 'are topologically disjoint: "they have no point in common. They form a set of disconnected' 'geometries." (A symmetric relationship, as defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).)', ) geoIntersects: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents spatial relations in which two geometries (or the places they represent)" "have at least one point in common. As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).", ) latitude: Union[ List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat ] = Field( default=None, description="The latitude of a location. For example ```37.42242``` ([WGS 84](https://en.wikipedia.org/wiki/World_Geodetic_System)).", ) maximumAttendeeCapacity: Optional[ Union[List[Union[str, int, Any]], str, int, Any] ] = Field( default=None, description="The total number of individuals that may attend an event or venue.", ) aggregateRating: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The overall rating, based on a collection of reviews or ratings, of the item.", ) map: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="A URL to a map of the place.", ) branchCode: Union[List[Union[str, Any]], str, Any] = Field( default=None, description='A short textual code (also called "store code") that uniquely identifies a place of' "business. The code is typically assigned by the parentOrganization and used in structured" "URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047" 'the code "3047" is a branchCode for a particular branch.', ) faxNumber: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The fax number.", ) publicAccess: Optional[ Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any] ] = Field( default=None, description="A flag to signal that the [[Place]] is open to public visitors. If this property is omitted" "there is no assumed default boolean value", ) geoTouches: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents spatial relations in which two geometries (or the places they represent)" 'touch: "they have at least one boundary point in common, but no interior points." (A' "symmetric relationship, as defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).)", ) geoCoveredBy: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents a relationship between two geometries (or the places they represent), relating" "a geometry to another that covers it. As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).", ) telephone: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The telephone number.", ) hasDriveThroughService: Optional[ Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any] ] = Field( default=None, description="Indicates whether some facility (e.g. [[FoodEstablishment]], [[CovidTestingFacility]])" "offers a service that can be used by driving through in a car. In the case of [[CovidTestingFacility]]" "such facilities could potentially help with social distancing from other potentially-infected" "users.", ) specialOpeningHoursSpecification: Optional[ Union[List[Union[str, Any]], str, Any] ] = Field( default=None, description="The special opening hours of a certain place.Use this to explicitly override general" "opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].", ) geoContains: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Represents a relationship between two geometries (or the places they represent), relating" 'a containing geometry to a contained geometry. "a contains b iff no points of b lie in' 'the exterior of a, and at least one point of the interior of b lies in the interior of a".' "As defined in [DE-9IM](https://en.wikipedia.org/wiki/DE-9IM).", ) priceRange: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The price range of the business, for example ```$$$```.", ) currenciesAccepted: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217)," 'e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies)' 'for cryptocurrencies, e.g. "BTC"; well known names for [Local Exchange Trading Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system)' '(LETS) and other currency types, e.g. "Ithaca HOUR".', ) branchOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The larger organization that this local business is a branch of, if any. Not to be confused" "with (anatomical) [[branch]].", ) paymentAccepted: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.", ) openingHours: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The general opening hours for a business. Opening hours can be specified as a weekly time" "range, starting with days, then times per day. Multiple days can be listed with commas" "',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are" "specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```," "```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 format." "For example, 3pm is specified as ```15:00```, 10am as ```10:00```. * Here is an example:" '<code>&lt;time itemprop="openingHours" datetime=&quot;Tu,Th 16:00-20:00&quot;&gt;Tuesdays' "and Thursdays 4-8pm&lt;/time&gt;</code>.* If a business is open 7 days a week, then" "it can be specified as <code>&lt;time itemprop=&quot;openingHours&quot; datetime=&quot;Mo-Su&quot;&gt;Monday" "through Sunday, all day&lt;/time&gt;</code>.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/BeautySalon.py
0.865579
0.367582
BeautySalon.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class Grant(BaseModel): """A grant, typically financial or otherwise quantifiable, of resources. Typically a [[funder]] sponsors some [[MonetaryAmount]] to an [[Organization]] or [[Person]], sometimes not necessarily via a dedicated or long-lived [[Project]], resulting in one or more outputs, or [[fundedItem]]s. For financial sponsorship, indicate the [[funder]] of a [[MonetaryGrant]]. For non-financial support, indicate [[sponsor]] of [[Grant]]s of resources (e.g. office space).Grants support activities directed towards some agreed collective goals, often but not always organized as [[Project]]s. Long-lived projects are sometimes sponsored by a variety of grants over time, but it is also common for a project to be associated with a single grant.The amount of a [[Grant]] is represented using [[amount]] as a [[MonetaryAmount]]. References: https://schema.org/Grant Note: Model Depth 3 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. fundedItem: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates something directly or indirectly funded or sponsored through a [[Grant]]. See also [[ownershipFundingInfo]]. funder: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports (sponsors) something through some kind of financial contribution. sponsor: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports a thing through a pledge, promise, or financial contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event. """ type_: str = Field(default="Grant", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) fundedItem: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates something directly or indirectly funded or sponsored through a [[Grant]]." "See also [[ownershipFundingInfo]].", ) funder: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person or organization that supports (sponsors) something through some kind of financial" "contribution.", ) sponsor: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person or organization that supports a thing through a pledge, promise, or financial" "contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/Grant.py
0.936278
0.373704
Grant.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class EUEnergyEfficiencyCategoryA3Plus(BaseModel): """Represents EU Energy Efficiency Class A+++ as defined in EU energy labeling regulations. References: https://schema.org/EUEnergyEfficiencyCategoryA3Plus Note: Model Depth 6 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. supersededBy: (Optional[Union[List[Union[str, Any]], str, Any]]): Relates a term (i.e. a property, class or enumeration) to one that supersedes it. """ type_: str = Field( default="EUEnergyEfficiencyCategoryA3Plus", alias="@type", const=True ) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) supersededBy: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Relates a term (i.e. a property, class or enumeration) to one that supersedes it.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/EUEnergyEfficiencyCategoryA3Plus.py
0.943835
0.321247
EUEnergyEfficiencyCategoryA3Plus.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class VideoGameClip(BaseModel): """A short segment/part of a video game. References: https://schema.org/VideoGameClip Note: Model Depth 4 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. workTranslation: (Optional[Union[List[Union[str, Any]], str, Any]]): A work that is a translation of the content of this work. E.g. 西遊記 has an English workTranslation “Journey to the West”, a German workTranslation “Monkeys Pilgerfahrt” and a Vietnamese translation Tây du ký bình khảo. educationalLevel: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The level in terms of progression through an educational or training context. Examples of educational levels include 'beginner', 'intermediate' or 'advanced', and formal sets of level indicators. associatedMedia: (Optional[Union[List[Union[str, Any]], str, Any]]): A media object that encodes this CreativeWork. This property is a synonym for encoding. exampleOfWork: (Optional[Union[List[Union[str, Any]], str, Any]]): A creative work that this work is an example/instance/realization/derivation of. releasedEvent: (Optional[Union[List[Union[str, Any]], str, Any]]): The place and time the release was issued, expressed as a PublicationEvent. version: (Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]): The version of the CreativeWork embodied by a specified resource. locationCreated: (Optional[Union[List[Union[str, Any]], str, Any]]): The location where the CreativeWork was created, which may not be the same as the location depicted in the CreativeWork. acquireLicensePage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page documenting how licenses can be purchased or otherwise acquired, for the current item. thumbnailUrl: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A thumbnail image relevant to the Thing. provider: (Optional[Union[List[Union[str, Any]], str, Any]]): The service provider, service operator, or service performer; the goods producer. Another party (a seller) may offer those services or goods on behalf of the provider. A provider may also serve as the seller. expires: (Optional[Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]]): Date the content expires and is no longer useful or available. For example a [[VideoObject]] or [[NewsArticle]] whose availability or relevance is time-limited, or a [[ClaimReview]] fact check whose publisher wants to indicate that it may no longer be relevant (or helpful to highlight) after some date. contentLocation: (Optional[Union[List[Union[str, Any]], str, Any]]): The location depicted or described in the content. For example, the location in a photograph or painting. educationalUse: (Union[List[Union[str, Any]], str, Any]): The purpose of a work in the context of education; for example, 'assignment', 'group work'. copyrightHolder: (Optional[Union[List[Union[str, Any]], str, Any]]): The party holding the legal copyright to the CreativeWork. accessibilityControl: (Union[List[Union[str, Any]], str, Any]): Identifies input methods that are sufficient to fully control the described resource. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityControl-vocabulary). maintainer: (Optional[Union[List[Union[str, Any]], str, Any]]): A maintainer of a [[Dataset]], software package ([[SoftwareApplication]]), or other [[Project]]. A maintainer is a [[Person]] or [[Organization]] that manages contributions to, and/or publication of, some (typically complex) artifact. It is common for distributions of software and data to be based on "upstream" sources. When [[maintainer]] is applied to a specific version of something e.g. a particular version or packaging of a [[Dataset]], it is always possible that the upstream source has a different maintainer. The [[isBasedOn]] property can be used to indicate such relationships between datasets to make the different maintenance roles clear. Similarly in the case of software, a package may have dedicated maintainers working on integration into software distributions such as Ubuntu, as well as upstream maintainers of the underlying work. educationalAlignment: (Optional[Union[List[Union[str, Any]], str, Any]]): An alignment to an established educational framework.This property should not be used where the nature of the alignment can be described using a simple property, for example to express that a resource [[teaches]] or [[assesses]] a competency. spatial: (Optional[Union[List[Union[str, Any]], str, Any]]): The "spatial" property can be used in cases when more specific properties(e.g. [[locationCreated]], [[spatialCoverage]], [[contentLocation]]) are not known to be appropriate. publisher: (Optional[Union[List[Union[str, Any]], str, Any]]): The publisher of the creative work. keywords: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Keywords or tags used to describe some item. Multiple textual entries in a keywords list are typically delimited by commas, or by repeating the property. assesses: (Union[List[Union[str, Any]], str, Any]): The item being described is intended to assess the competency or learning outcome defined by the referenced term. reviews: (Optional[Union[List[Union[str, Any]], str, Any]]): Review of the item. isBasedOn: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A resource from which this work is derived or from which it is a modification or adaption. mentions: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates that the CreativeWork contains a reference to, but is not necessarily about a concept. publishingPrinciples: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): The publishingPrinciples property indicates (typically via [[URL]]) a document describing the editorial principles of an [[Organization]] (or individual, e.g. a [[Person]] writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles are those of the party primarily responsible for the creation of the [[CreativeWork]].While such policies are most typically expressed in natural language, sometimes related information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology. contributor: (Optional[Union[List[Union[str, Any]], str, Any]]): A secondary contributor to the CreativeWork or Event. license: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A license document that applies to this content, typically indicated by URL. citation: (Union[List[Union[str, Any]], str, Any]): A citation or reference to another creative work, such as another publication, web page, scholarly article, etc. accessibilitySummary: (Union[List[Union[str, Any]], str, Any]): A human-readable summary of specific accessibility features or deficiencies, consistent with the other accessibility metadata but expressing subtleties such as "short descriptions are present but long descriptions will be needed for non-visual users" or "short descriptions are present and no long descriptions are needed." award: (Union[List[Union[str, Any]], str, Any]): An award won by or for this item. commentCount: (Optional[Union[List[Union[str, int, Any]], str, int, Any]]): The number of comments this CreativeWork (e.g. Article, Question or Answer) has received. This is most applicable to works published in Web sites with commenting system; additional comments may exist elsewhere. temporalCoverage: (Union[List[Union[datetime, str, Any, AnyUrl]], datetime, str, Any, AnyUrl]): The temporalCoverage of a CreativeWork indicates the period that the content applies to, i.e. that it describes, either as a DateTime or as a textual string indicating a time period in [ISO 8601 time interval format](https://en.wikipedia.org/wiki/ISO_8601#Time_intervals). In the case of a Dataset it will typically indicate the relevant time period in a precise notation (e.g. for a 2011 census dataset, the year 2011 would be written "2011/2012"). Other forms of content, e.g. ScholarlyArticle, Book, TVSeries or TVEpisode, may indicate their temporalCoverage in broader terms - textually or via well-known URL. Written works such as books may sometimes have precise temporal coverage too, e.g. a work set in 1939 - 1945 can be indicated in ISO 8601 interval format format via "1939/1945".Open-ended date ranges can be written with ".." in place of the end date. For example, "2015-11/.." indicates a range beginning in November 2015 and with no specified final date. This is tentative and might be updated in future when ISO 8601 is officially updated. dateCreated: (Optional[Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]]): The date on which the CreativeWork was created or the item was added to a DataFeed. discussionUrl: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A link to the page containing the comments of the CreativeWork. copyrightNotice: (Union[List[Union[str, Any]], str, Any]): Text of a notice appropriate for describing the copyright aspects of this Creative Work, ideally indicating the owner of the copyright for the Work. learningResourceType: (Union[List[Union[str, Any]], str, Any]): The predominant type or kind characterizing the learning resource. For example, 'presentation', 'handout'. awards: (Union[List[Union[str, Any]], str, Any]): Awards won by or for this item. accessModeSufficient: (Optional[Union[List[Union[str, Any]], str, Any]]): A list of single or combined accessModes that are sufficient to understand all the intellectual content of a resource. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessModeSufficient-vocabulary). review: (Optional[Union[List[Union[str, Any]], str, Any]]): A review of the item. conditionsOfAccess: (Union[List[Union[str, Any]], str, Any]): Conditions that affect the availability of, or method(s) of access to, an item. Typically used for real world items such as an [[ArchiveComponent]] held by an [[ArchiveOrganization]]. This property is not suitable for use as a general Web access control mechanism. It is expressed only in natural language.For example "Available by appointment from the Reading Room" or "Accessible only from logged-in accounts ". interactivityType: (Union[List[Union[str, Any]], str, Any]): The predominant mode of learning supported by the learning resource. Acceptable values are 'active', 'expositive', or 'mixed'. abstract: (Union[List[Union[str, Any]], str, Any]): An abstract is a short description that summarizes a [[CreativeWork]]. fileFormat: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Media type, typically MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml)) of the content, e.g. application/zip of a SoftwareApplication binary. In cases where a CreativeWork has several media type representations, 'encoding' can be used to indicate each MediaObject alongside particular fileFormat information. Unregistered or niche file formats can be indicated instead via the most appropriate URL, e.g. defining Web page or a Wikipedia entry. interpretedAsClaim: (Optional[Union[List[Union[str, Any]], str, Any]]): Used to indicate a specific claim contained, implied, translated or refined from the content of a [[MediaObject]] or other [[CreativeWork]]. The interpreting party can be indicated using [[claimInterpreter]]. text: (Union[List[Union[str, Any]], str, Any]): The textual content of this CreativeWork. archivedAt: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page or other link involved in archival of a [[CreativeWork]]. In the case of [[MediaReview]], the items in a [[MediaReviewItem]] may often become inaccessible, but be archived by archival, journalistic, activist, or law enforcement organizations. In such cases, the referenced page may not directly publish the content. alternativeHeadline: (Union[List[Union[str, Any]], str, Any]): A secondary title of the CreativeWork. creditText: (Union[List[Union[str, Any]], str, Any]): Text that can be used to credit person(s) and/or organization(s) associated with a published Creative Work. funding: (Optional[Union[List[Union[str, Any]], str, Any]]): A [[Grant]] that directly or indirectly provide funding or sponsorship for this item. See also [[ownershipFundingInfo]]. interactionStatistic: (Optional[Union[List[Union[str, Any]], str, Any]]): The number of interactions for the CreativeWork using the WebSite or SoftwareApplication. The most specific child type of InteractionCounter should be used. workExample: (Optional[Union[List[Union[str, Any]], str, Any]]): Example/instance/realization/derivation of the concept of this creative work. E.g. the paperback edition, first edition, or e-book. about: (Optional[Union[List[Union[str, Any]], str, Any]]): The subject matter of the content. encodings: (Optional[Union[List[Union[str, Any]], str, Any]]): A media object that encodes this CreativeWork. funder: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports (sponsors) something through some kind of financial contribution. video: (Optional[Union[List[Union[str, Any]], str, Any]]): An embedded video object. isPartOf: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates an item or CreativeWork that this item, or CreativeWork (in some sense), is part of. pattern: (Union[List[Union[str, Any]], str, Any]): A pattern that something has, for example 'polka dot', 'striped', 'Canadian flag'. Values are typically expressed as text, although links to controlled value schemes are also supported. editor: (Optional[Union[List[Union[str, Any]], str, Any]]): Specifies the Person who edited the CreativeWork. dateModified: (Optional[Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]]): The date on which the CreativeWork was most recently modified or when the item's entry was modified within a DataFeed. translationOfWork: (Optional[Union[List[Union[str, Any]], str, Any]]): The work that this work has been translated from. E.g. 物种起源 is a translationOf “On the Origin of Species”. creativeWorkStatus: (Union[List[Union[str, Any]], str, Any]): The status of a creative work in terms of its stage in a lifecycle. Example terms include Incomplete, Draft, Published, Obsolete. Some organizations define a set of terms for the stages of their publication lifecycle. isBasedOnUrl: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A resource that was used in the creation of this resource. This term can be repeated for multiple sources. For example, http://example.com/great-multiplication-intro.html. isFamilyFriendly: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): Indicates whether this content is family friendly. isAccessibleForFree: (Optional[Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any]]): A flag to signal that the item, event, or place is accessible for free. author: (Optional[Union[List[Union[str, Any]], str, Any]]): The author of this content or rating. Please note that author is special in that HTML 5 provides a special mechanism for indicating authorship via the rel tag. That is equivalent to this and may be used interchangeably. contentReferenceTime: (Optional[Union[List[Union[datetime, str, Any]], datetime, str, Any]]): The specific time described by a creative work, for works (e.g. articles, video objects etc.) that emphasise a particular moment within an Event. correction: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Indicates a correction to a [[CreativeWork]], either via a [[CorrectionComment]], textually or in another document. sdDatePublished: (Optional[Union[List[Union[str, Any, date]], str, Any, date]]): Indicates the date on which the current structured data was generated / published. Typically used alongside [[sdPublisher]] comment: (Optional[Union[List[Union[str, Any]], str, Any]]): Comments, typically from users. countryOfOrigin: (Optional[Union[List[Union[str, Any]], str, Any]]): The country of origin of something, including products as well as creative works such as movie and TV content.In the case of TV and movie, this would be the country of the principle offices of the production company or individual responsible for the movie. For other kinds of [[CreativeWork]] it is difficult to provide fully general guidance, and properties such as [[contentLocation]] and [[locationCreated]] may be more applicable.In the case of products, the country of origin of the product. The exact interpretation of this may vary by context and product type, and cannot be fully enumerated here. timeRequired: (Optional[Union[List[Union[str, Any]], str, Any]]): Approximate or typical time it takes to work with or through this learning resource for the typical intended target audience, e.g. 'PT30M', 'PT1H25M'. typicalAgeRange: (Union[List[Union[str, Any]], str, Any]): The typical expected age range, e.g. '7-9', '11-'. genre: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Genre of the creative work, broadcast channel or group. producer: (Optional[Union[List[Union[str, Any]], str, Any]]): The person or organization who produced the work (e.g. music album, movie, TV/radio series etc.). schemaVersion: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Indicates (by URL or string) a particular version of a schema used in some CreativeWork. This property was created primarily to indicate the use of a specific schema.org release, e.g. ```10.0``` as a simple string, or more explicitly via URL, ```https://schema.org/docs/releases.html#v10.0```. There may be situations in which other schemas might usefully be referenced this way, e.g. ```http://dublincore.org/specifications/dublin-core/dces/1999-07-02/``` but this has not been carefully explored in the community. audience: (Optional[Union[List[Union[str, Any]], str, Any]]): An intended audience, i.e. a group for whom something was created. encoding: (Optional[Union[List[Union[str, Any]], str, Any]]): A media object that encodes this CreativeWork. This property is a synonym for associatedMedia. publisherImprint: (Optional[Union[List[Union[str, Any]], str, Any]]): The publishing division which published the comic. accessibilityAPI: (Union[List[Union[str, Any]], str, Any]): Indicates that the resource is compatible with the referenced accessibility API. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityAPI-vocabulary). sdPublisher: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates the party responsible for generating and publishing the current structured data markup, typically in cases where the structured data is derived automatically from existing published content but published on a different site. For example, student projects and open data initiatives often re-publish existing content with more explicitly structured metadata. The[[sdPublisher]] property helps make such practices more explicit. audio: (Optional[Union[List[Union[str, Any]], str, Any]]): An embedded audio object. accessibilityFeature: (Union[List[Union[str, Any]], str, Any]): Content features of the resource, such as accessible media, alternatives and supported enhancements for accessibility. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityFeature-vocabulary). spatialCoverage: (Optional[Union[List[Union[str, Any]], str, Any]]): The spatialCoverage of a CreativeWork indicates the place(s) which are the focus of the content. It is a subproperty of contentLocation intended primarily for more technical and detailed materials. For example with a Dataset, it indicates areas that the dataset describes: a dataset of New York weather would have spatialCoverage which was the place: the state of New York. accessMode: (Union[List[Union[str, Any]], str, Any]): The human sensory perceptual system or cognitive faculty through which a person may process or perceive information. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessMode-vocabulary). editEIDR: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): An [EIDR](https://eidr.org/) (Entertainment Identifier Registry) [[identifier]] representing a specific edit / edition for a work of film or television.For example, the motion picture known as "Ghostbusters" whose [[titleEIDR]] is "10.5240/7EC7-228A-510A-053E-CBB8-J" has several edits, e.g. "10.5240/1F2A-E1C5-680A-14C6-E76B-I" and "10.5240/8A35-3BEE-6497-5D12-9E4F-3".Since schema.org types like [[Movie]] and [[TVEpisode]] can be used for both works and their multiple expressions, it is possible to use [[titleEIDR]] alone (for a general description), or alongside [[editEIDR]] for a more edit-specific description. usageInfo: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): The schema.org [[usageInfo]] property indicates further information about a [[CreativeWork]]. This property is applicable both to works that are freely available and to those that require payment or other transactions. It can reference additional information, e.g. community expectations on preferred linking and citation conventions, as well as purchasing details. For something that can be commercially licensed, usageInfo can provide detailed, resource-specific information about licensing options.This property can be used alongside the license property which indicates license(s) applicable to some piece of content. The usageInfo property can provide information about other licensing options, e.g. acquiring commercial usage rights for an image that is also available under non-commercial creative commons licenses. position: (Union[List[Union[str, int, Any]], str, int, Any]): The position of an item in a series or sequence of items. encodingFormat: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): Media type typically expressed using a MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml) and [MDN reference](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types)), e.g. application/zip for a SoftwareApplication binary, audio/mpeg for .mp3 etc.In cases where a [[CreativeWork]] has several media type representations, [[encoding]] can be used to indicate each [[MediaObject]] alongside particular [[encodingFormat]] information.Unregistered or niche encoding and file formats can be indicated instead via the most appropriate URL, e.g. defining Web page or a Wikipedia/Wikidata entry. copyrightYear: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): The year during which the claimed copyright for the CreativeWork was first asserted. mainEntity: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates the primary entity described in some page or other CreativeWork. creator: (Optional[Union[List[Union[str, Any]], str, Any]]): The creator/author of this CreativeWork. This is the same as the Author property for CreativeWork. teaches: (Union[List[Union[str, Any]], str, Any]): The item being described is intended to help a person learn the competency or learning outcome defined by the referenced term. temporal: (Union[List[Union[datetime, str, Any]], datetime, str, Any]): The "temporal" property can be used in cases where more specific properties(e.g. [[temporalCoverage]], [[dateCreated]], [[dateModified]], [[datePublished]]) are not known to be appropriate. size: (Union[List[Union[str, Any]], str, Any]): A standardized size of a product or creative work, specified either through a simple textual string (for example 'XL', '32Wx34L'), a QuantitativeValue with a unitCode, or a comprehensive and structured [[SizeSpecification]]; in other cases, the [[width]], [[height]], [[depth]] and [[weight]] properties may be more applicable. translator: (Optional[Union[List[Union[str, Any]], str, Any]]): Organization or person who adapts a creative work to different languages, regional differences and technical requirements of a target market, or that translates during some event. aggregateRating: (Optional[Union[List[Union[str, Any]], str, Any]]): The overall rating, based on a collection of reviews or ratings, of the item. accountablePerson: (Optional[Union[List[Union[str, Any]], str, Any]]): Specifies the Person that is legally accountable for the CreativeWork. accessibilityHazard: (Union[List[Union[str, Any]], str, Any]): A characteristic of the described resource that is physiologically dangerous to some users. Related to WCAG 2.0 guideline 2.3. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityHazard-vocabulary). contentRating: (Union[List[Union[str, Any]], str, Any]): Official rating of a piece of content&#x2014;for example, 'MPAA PG-13'. recordedAt: (Optional[Union[List[Union[str, Any]], str, Any]]): The Event where the CreativeWork was recorded. The CreativeWork may capture all or part of the event. publication: (Optional[Union[List[Union[str, Any]], str, Any]]): A publication event associated with the item. sdLicense: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): A license document that applies to this structured data, typically indicated by URL. headline: (Union[List[Union[str, Any]], str, Any]): Headline of the article. materialExtent: (Union[List[Union[str, Any]], str, Any]): The quantity of the materials being described or an expression of the physical space they occupy. inLanguage: (Union[List[Union[str, Any]], str, Any]): The language of the content or performance or used in an action. Please use one of the language codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47). See also [[availableLanguage]]. material: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): A material that something is made from, e.g. leather, wool, cotton, paper. datePublished: (Optional[Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date]]): Date of first broadcast/publication. offers: (Optional[Union[List[Union[str, Any]], str, Any]]): An offer to provide this item&#x2014;for example, an offer to sell a product, rent the DVD of a movie, perform a service, or give away tickets to an event. Use [[businessFunction]] to indicate the kind of transaction offered, i.e. sell, lease, etc. This property can also be used to describe a [[Demand]]. While this property is listed as expected on a number of common types, it can be used in others. In that case, using a second type, such as Product or a subtype of Product, can clarify the nature of the offer. hasPart: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates an item or CreativeWork that is part of this item, or CreativeWork (in some sense). sourceOrganization: (Optional[Union[List[Union[str, Any]], str, Any]]): The Organization on whose behalf the creator was working. sponsor: (Optional[Union[List[Union[str, Any]], str, Any]]): A person or organization that supports a thing through a pledge, promise, or financial contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event. character: (Optional[Union[List[Union[str, Any]], str, Any]]): Fictional person connected with a creative work. actors: (Optional[Union[List[Union[str, Any]], str, Any]]): An actor, e.g. in TV, radio, movie, video games etc. Actors can be associated with individual items or with a series, episode, clip. actor: (Optional[Union[List[Union[str, Any]], str, Any]]): An actor, e.g. in TV, radio, movie, video games etc., or in an event. Actors can be associated with individual items or with a series, episode, clip. clipNumber: (Union[List[Union[str, int, Any]], str, int, Any]): Position of the clip within an ordered group of clips. partOfEpisode: (Optional[Union[List[Union[str, Any]], str, Any]]): The episode to which this clip belongs. partOfSeason: (Optional[Union[List[Union[str, Any]], str, Any]]): The season to which this episode belongs. startOffset: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): The start time of the clip expressed as the number of seconds from the beginning of the work. partOfSeries: (Optional[Union[List[Union[str, Any]], str, Any]]): The series to which this episode or season belongs. endOffset: (Optional[Union[List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat]]): The end time of the clip expressed as the number of seconds from the beginning of the work. director: (Optional[Union[List[Union[str, Any]], str, Any]]): A director of e.g. TV, radio, movie, video gaming etc. content, or of an event. Directors can be associated with individual items or with a series, episode, clip. directors: (Optional[Union[List[Union[str, Any]], str, Any]]): A director of e.g. TV, radio, movie, video games etc. content. Directors can be associated with individual items or with a series, episode, clip. musicBy: (Optional[Union[List[Union[str, Any]], str, Any]]): The composer of the soundtrack. """ type_: str = Field(default="VideoGameClip", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) workTranslation: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A work that is a translation of the content of this work. E.g. 西遊記 has an English workTranslation" "“Journey to the West”, a German workTranslation “Monkeys Pilgerfahrt” and a Vietnamese" "translation Tây du ký bình khảo.", ) educationalLevel: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The level in terms of progression through an educational or training context. Examples" "of educational levels include 'beginner', 'intermediate' or 'advanced', and formal" "sets of level indicators.", ) associatedMedia: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A media object that encodes this CreativeWork. This property is a synonym for encoding.", ) exampleOfWork: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A creative work that this work is an example/instance/realization/derivation of.", ) releasedEvent: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The place and time the release was issued, expressed as a PublicationEvent.", ) version: Union[ List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat ] = Field( default=None, description="The version of the CreativeWork embodied by a specified resource.", ) locationCreated: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The location where the CreativeWork was created, which may not be the same as the location" "depicted in the CreativeWork.", ) acquireLicensePage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page documenting how licenses can be purchased or otherwise acquired, for" "the current item.", ) thumbnailUrl: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="A thumbnail image relevant to the Thing.", ) provider: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The service provider, service operator, or service performer; the goods producer." "Another party (a seller) may offer those services or goods on behalf of the provider." "A provider may also serve as the seller.", ) expires: Optional[ Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date] ] = Field( default=None, description="Date the content expires and is no longer useful or available. For example a [[VideoObject]]" "or [[NewsArticle]] whose availability or relevance is time-limited, or a [[ClaimReview]]" "fact check whose publisher wants to indicate that it may no longer be relevant (or helpful" "to highlight) after some date.", ) contentLocation: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The location depicted or described in the content. For example, the location in a photograph" "or painting.", ) educationalUse: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The purpose of a work in the context of education; for example, 'assignment', 'group" "work'.", ) copyrightHolder: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The party holding the legal copyright to the CreativeWork.", ) accessibilityControl: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Identifies input methods that are sufficient to fully control the described resource." "Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityControl-vocabulary).", ) maintainer: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A maintainer of a [[Dataset]], software package ([[SoftwareApplication]]), or other" "[[Project]]. A maintainer is a [[Person]] or [[Organization]] that manages contributions" "to, and/or publication of, some (typically complex) artifact. It is common for distributions" 'of software and data to be based on "upstream" sources. When [[maintainer]] is applied' "to a specific version of something e.g. a particular version or packaging of a [[Dataset]]," "it is always possible that the upstream source has a different maintainer. The [[isBasedOn]]" "property can be used to indicate such relationships between datasets to make the different" "maintenance roles clear. Similarly in the case of software, a package may have dedicated" "maintainers working on integration into software distributions such as Ubuntu, as" "well as upstream maintainers of the underlying work.", ) educationalAlignment: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An alignment to an established educational framework.This property should not be used" "where the nature of the alignment can be described using a simple property, for example" "to express that a resource [[teaches]] or [[assesses]] a competency.", ) spatial: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description='The "spatial" property can be used in cases when more specific properties(e.g. [[locationCreated]],' "[[spatialCoverage]], [[contentLocation]]) are not known to be appropriate.", ) publisher: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The publisher of the creative work.", ) keywords: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Keywords or tags used to describe some item. Multiple textual entries in a keywords list" "are typically delimited by commas, or by repeating the property.", ) assesses: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The item being described is intended to assess the competency or learning outcome defined" "by the referenced term.", ) reviews: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Review of the item.", ) isBasedOn: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="A resource from which this work is derived or from which it is a modification or adaption.", ) mentions: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates that the CreativeWork contains a reference to, but is not necessarily about" "a concept.", ) publishingPrinciples: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="The publishingPrinciples property indicates (typically via [[URL]]) a document describing" "the editorial principles of an [[Organization]] (or individual, e.g. a [[Person]]" "writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity" "policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles" "are those of the party primarily responsible for the creation of the [[CreativeWork]].While" "such policies are most typically expressed in natural language, sometimes related" "information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology.", ) contributor: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A secondary contributor to the CreativeWork or Event.", ) license: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="A license document that applies to this content, typically indicated by URL.", ) citation: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A citation or reference to another creative work, such as another publication, web page," "scholarly article, etc.", ) accessibilitySummary: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A human-readable summary of specific accessibility features or deficiencies, consistent" 'with the other accessibility metadata but expressing subtleties such as "short descriptions' 'are present but long descriptions will be needed for non-visual users" or "short descriptions' 'are present and no long descriptions are needed."', ) award: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An award won by or for this item.", ) commentCount: Optional[Union[List[Union[str, int, Any]], str, int, Any]] = Field( default=None, description="The number of comments this CreativeWork (e.g. Article, Question or Answer) has received." "This is most applicable to works published in Web sites with commenting system; additional" "comments may exist elsewhere.", ) temporalCoverage: Union[ List[Union[datetime, str, Any, AnyUrl]], datetime, str, Any, AnyUrl ] = Field( default=None, description="The temporalCoverage of a CreativeWork indicates the period that the content applies" "to, i.e. that it describes, either as a DateTime or as a textual string indicating a time" "period in [ISO 8601 time interval format](https://en.wikipedia.org/wiki/ISO_8601#Time_intervals)." "In the case of a Dataset it will typically indicate the relevant time period in a precise" 'notation (e.g. for a 2011 census dataset, the year 2011 would be written "2011/2012").' "Other forms of content, e.g. ScholarlyArticle, Book, TVSeries or TVEpisode, may indicate" "their temporalCoverage in broader terms - textually or via well-known URL. Written" "works such as books may sometimes have precise temporal coverage too, e.g. a work set" 'in 1939 - 1945 can be indicated in ISO 8601 interval format format via "1939/1945".Open-ended' 'date ranges can be written with ".." in place of the end date. For example, "2015-11/.."' "indicates a range beginning in November 2015 and with no specified final date. This is" "tentative and might be updated in future when ISO 8601 is officially updated.", ) dateCreated: Optional[ Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date] ] = Field( default=None, description="The date on which the CreativeWork was created or the item was added to a DataFeed.", ) discussionUrl: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="A link to the page containing the comments of the CreativeWork.", ) copyrightNotice: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Text of a notice appropriate for describing the copyright aspects of this Creative Work," "ideally indicating the owner of the copyright for the Work.", ) learningResourceType: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The predominant type or kind characterizing the learning resource. For example, 'presentation'," "'handout'.", ) awards: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Awards won by or for this item.", ) accessModeSufficient: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A list of single or combined accessModes that are sufficient to understand all the intellectual" "content of a resource. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessModeSufficient-vocabulary).", ) review: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A review of the item.", ) conditionsOfAccess: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Conditions that affect the availability of, or method(s) of access to, an item. Typically" "used for real world items such as an [[ArchiveComponent]] held by an [[ArchiveOrganization]]." "This property is not suitable for use as a general Web access control mechanism. It is" 'expressed only in natural language.For example "Available by appointment from the' 'Reading Room" or "Accessible only from logged-in accounts ".', ) interactivityType: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The predominant mode of learning supported by the learning resource. Acceptable values" "are 'active', 'expositive', or 'mixed'.", ) abstract: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An abstract is a short description that summarizes a [[CreativeWork]].", ) fileFormat: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Media type, typically MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml))" "of the content, e.g. application/zip of a SoftwareApplication binary. In cases where" "a CreativeWork has several media type representations, 'encoding' can be used to indicate" "each MediaObject alongside particular fileFormat information. Unregistered or niche" "file formats can be indicated instead via the most appropriate URL, e.g. defining Web" "page or a Wikipedia entry.", ) interpretedAsClaim: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Used to indicate a specific claim contained, implied, translated or refined from the" "content of a [[MediaObject]] or other [[CreativeWork]]. The interpreting party can" "be indicated using [[claimInterpreter]].", ) text: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The textual content of this CreativeWork.", ) archivedAt: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page or other link involved in archival of a [[CreativeWork]]. In the case" "of [[MediaReview]], the items in a [[MediaReviewItem]] may often become inaccessible," "but be archived by archival, journalistic, activist, or law enforcement organizations." "In such cases, the referenced page may not directly publish the content.", ) alternativeHeadline: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A secondary title of the CreativeWork.", ) creditText: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Text that can be used to credit person(s) and/or organization(s) associated with a published" "Creative Work.", ) funding: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A [[Grant]] that directly or indirectly provide funding or sponsorship for this item." "See also [[ownershipFundingInfo]].", ) interactionStatistic: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The number of interactions for the CreativeWork using the WebSite or SoftwareApplication." "The most specific child type of InteractionCounter should be used.", ) workExample: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Example/instance/realization/derivation of the concept of this creative work. E.g." "the paperback edition, first edition, or e-book.", ) about: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The subject matter of the content.", ) encodings: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A media object that encodes this CreativeWork.", ) funder: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person or organization that supports (sponsors) something through some kind of financial" "contribution.", ) video: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An embedded video object.", ) isPartOf: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="Indicates an item or CreativeWork that this item, or CreativeWork (in some sense), is" "part of.", ) pattern: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A pattern that something has, for example 'polka dot', 'striped', 'Canadian flag'." "Values are typically expressed as text, although links to controlled value schemes" "are also supported.", ) editor: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Specifies the Person who edited the CreativeWork.", ) dateModified: Optional[ Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date] ] = Field( default=None, description="The date on which the CreativeWork was most recently modified or when the item's entry" "was modified within a DataFeed.", ) translationOfWork: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The work that this work has been translated from. E.g. 物种起源 is a translationOf “On the" "Origin of Species”.", ) creativeWorkStatus: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The status of a creative work in terms of its stage in a lifecycle. Example terms include" "Incomplete, Draft, Published, Obsolete. Some organizations define a set of terms for" "the stages of their publication lifecycle.", ) isBasedOnUrl: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="A resource that was used in the creation of this resource. This term can be repeated for" "multiple sources. For example, http://example.com/great-multiplication-intro.html.", ) isFamilyFriendly: Optional[ Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any] ] = Field( default=None, description="Indicates whether this content is family friendly.", ) isAccessibleForFree: Optional[ Union[List[Union[str, StrictBool, Any]], str, StrictBool, Any] ] = Field( default=None, description="A flag to signal that the item, event, or place is accessible for free.", ) author: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The author of this content or rating. Please note that author is special in that HTML 5" "provides a special mechanism for indicating authorship via the rel tag. That is equivalent" "to this and may be used interchangeably.", ) contentReferenceTime: Optional[ Union[List[Union[datetime, str, Any]], datetime, str, Any] ] = Field( default=None, description="The specific time described by a creative work, for works (e.g. articles, video objects" "etc.) that emphasise a particular moment within an Event.", ) correction: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Indicates a correction to a [[CreativeWork]], either via a [[CorrectionComment]]," "textually or in another document.", ) sdDatePublished: Optional[ Union[List[Union[str, Any, date]], str, Any, date] ] = Field( default=None, description="Indicates the date on which the current structured data was generated / published. Typically" "used alongside [[sdPublisher]]", ) comment: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Comments, typically from users.", ) countryOfOrigin: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The country of origin of something, including products as well as creative works such" "as movie and TV content.In the case of TV and movie, this would be the country of the principle" "offices of the production company or individual responsible for the movie. For other" "kinds of [[CreativeWork]] it is difficult to provide fully general guidance, and properties" "such as [[contentLocation]] and [[locationCreated]] may be more applicable.In the" "case of products, the country of origin of the product. The exact interpretation of this" "may vary by context and product type, and cannot be fully enumerated here.", ) timeRequired: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Approximate or typical time it takes to work with or through this learning resource for" "the typical intended target audience, e.g. 'PT30M', 'PT1H25M'.", ) typicalAgeRange: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The typical expected age range, e.g. '7-9', '11-'.", ) genre: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Genre of the creative work, broadcast channel or group.", ) producer: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The person or organization who produced the work (e.g. music album, movie, TV/radio" "series etc.).", ) schemaVersion: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Indicates (by URL or string) a particular version of a schema used in some CreativeWork." "This property was created primarily to indicate the use of a specific schema.org release," "e.g. ```10.0``` as a simple string, or more explicitly via URL, ```https://schema.org/docs/releases.html#v10.0```." "There may be situations in which other schemas might usefully be referenced this way," "e.g. ```http://dublincore.org/specifications/dublin-core/dces/1999-07-02/```" "but this has not been carefully explored in the community.", ) audience: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An intended audience, i.e. a group for whom something was created.", ) encoding: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A media object that encodes this CreativeWork. This property is a synonym for associatedMedia.", ) publisherImprint: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The publishing division which published the comic.", ) accessibilityAPI: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Indicates that the resource is compatible with the referenced accessibility API. Values" "should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityAPI-vocabulary).", ) sdPublisher: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates the party responsible for generating and publishing the current structured" "data markup, typically in cases where the structured data is derived automatically" "from existing published content but published on a different site. For example, student" "projects and open data initiatives often re-publish existing content with more explicitly" "structured metadata. The[[sdPublisher]] property helps make such practices more" "explicit.", ) audio: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An embedded audio object.", ) accessibilityFeature: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Content features of the resource, such as accessible media, alternatives and supported" "enhancements for accessibility. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityFeature-vocabulary).", ) spatialCoverage: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The spatialCoverage of a CreativeWork indicates the place(s) which are the focus of" "the content. It is a subproperty of contentLocation intended primarily for more technical" "and detailed materials. For example with a Dataset, it indicates areas that the dataset" "describes: a dataset of New York weather would have spatialCoverage which was the place:" "the state of New York.", ) accessMode: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The human sensory perceptual system or cognitive faculty through which a person may" "process or perceive information. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessMode-vocabulary).", ) editEIDR: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="An [EIDR](https://eidr.org/) (Entertainment Identifier Registry) [[identifier]]" "representing a specific edit / edition for a work of film or television.For example," 'the motion picture known as "Ghostbusters" whose [[titleEIDR]] is "10.5240/7EC7-228A-510A-053E-CBB8-J"' 'has several edits, e.g. "10.5240/1F2A-E1C5-680A-14C6-E76B-I" and "10.5240/8A35-3BEE-6497-5D12-9E4F-3".Since' "schema.org types like [[Movie]] and [[TVEpisode]] can be used for both works and their" "multiple expressions, it is possible to use [[titleEIDR]] alone (for a general description)," "or alongside [[editEIDR]] for a more edit-specific description.", ) usageInfo: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="The schema.org [[usageInfo]] property indicates further information about a [[CreativeWork]]." "This property is applicable both to works that are freely available and to those that" "require payment or other transactions. It can reference additional information, e.g." "community expectations on preferred linking and citation conventions, as well as purchasing" "details. For something that can be commercially licensed, usageInfo can provide detailed," "resource-specific information about licensing options.This property can be used" "alongside the license property which indicates license(s) applicable to some piece" "of content. The usageInfo property can provide information about other licensing options," "e.g. acquiring commercial usage rights for an image that is also available under non-commercial" "creative commons licenses.", ) position: Union[List[Union[str, int, Any]], str, int, Any] = Field( default=None, description="The position of an item in a series or sequence of items.", ) encodingFormat: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="Media type typically expressed using a MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml)" "and [MDN reference](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types))," "e.g. application/zip for a SoftwareApplication binary, audio/mpeg for .mp3 etc.In" "cases where a [[CreativeWork]] has several media type representations, [[encoding]]" "can be used to indicate each [[MediaObject]] alongside particular [[encodingFormat]]" "information.Unregistered or niche encoding and file formats can be indicated instead" "via the most appropriate URL, e.g. defining Web page or a Wikipedia/Wikidata entry.", ) copyrightYear: Optional[ Union[ List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat, ] ] = Field( default=None, description="The year during which the claimed copyright for the CreativeWork was first asserted.", ) mainEntity: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates the primary entity described in some page or other CreativeWork.", ) creator: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The creator/author of this CreativeWork. This is the same as the Author property for" "CreativeWork.", ) teaches: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The item being described is intended to help a person learn the competency or learning" "outcome defined by the referenced term.", ) temporal: Union[List[Union[datetime, str, Any]], datetime, str, Any] = Field( default=None, description='The "temporal" property can be used in cases where more specific properties(e.g.' "[[temporalCoverage]], [[dateCreated]], [[dateModified]], [[datePublished]])" "are not known to be appropriate.", ) size: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A standardized size of a product or creative work, specified either through a simple" "textual string (for example 'XL', '32Wx34L'), a QuantitativeValue with a unitCode," "or a comprehensive and structured [[SizeSpecification]]; in other cases, the [[width]]," "[[height]], [[depth]] and [[weight]] properties may be more applicable.", ) translator: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Organization or person who adapts a creative work to different languages, regional" "differences and technical requirements of a target market, or that translates during" "some event.", ) aggregateRating: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The overall rating, based on a collection of reviews or ratings, of the item.", ) accountablePerson: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Specifies the Person that is legally accountable for the CreativeWork.", ) accessibilityHazard: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A characteristic of the described resource that is physiologically dangerous to some" "users. Related to WCAG 2.0 guideline 2.3. Values should be drawn from the [approved vocabulary](https://www.w3.org/2021/a11y-discov-vocab/latest/#accessibilityHazard-vocabulary).", ) contentRating: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Official rating of a piece of content&#x2014;for example, 'MPAA PG-13'.", ) recordedAt: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The Event where the CreativeWork was recorded. The CreativeWork may capture all or part" "of the event.", ) publication: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A publication event associated with the item.", ) sdLicense: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="A license document that applies to this structured data, typically indicated by URL.", ) headline: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="Headline of the article.", ) materialExtent: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The quantity of the materials being described or an expression of the physical space" "they occupy.", ) inLanguage: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The language of the content or performance or used in an action. Please use one of the language" "codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47). See also" "[[availableLanguage]].", ) material: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="A material that something is made from, e.g. leather, wool, cotton, paper.", ) datePublished: Optional[ Union[List[Union[datetime, str, Any, date]], datetime, str, Any, date] ] = Field( default=None, description="Date of first broadcast/publication.", ) offers: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An offer to provide this item&#x2014;for example, an offer to sell a product, rent the" "DVD of a movie, perform a service, or give away tickets to an event. Use [[businessFunction]]" "to indicate the kind of transaction offered, i.e. sell, lease, etc. This property can" "also be used to describe a [[Demand]]. While this property is listed as expected on a number" "of common types, it can be used in others. In that case, using a second type, such as Product" "or a subtype of Product, can clarify the nature of the offer.", ) hasPart: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates an item or CreativeWork that is part of this item, or CreativeWork (in some" "sense).", ) sourceOrganization: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The Organization on whose behalf the creator was working.", ) sponsor: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A person or organization that supports a thing through a pledge, promise, or financial" "contribution. E.g. a sponsor of a Medical Study or a corporate sponsor of an event.", ) character: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Fictional person connected with a creative work.", ) actors: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An actor, e.g. in TV, radio, movie, video games etc. Actors can be associated with individual" "items or with a series, episode, clip.", ) actor: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="An actor, e.g. in TV, radio, movie, video games etc., or in an event. Actors can be associated" "with individual items or with a series, episode, clip.", ) clipNumber: Union[List[Union[str, int, Any]], str, int, Any] = Field( default=None, description="Position of the clip within an ordered group of clips.", ) partOfEpisode: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The episode to which this clip belongs.", ) partOfSeason: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The season to which this episode belongs.", ) startOffset: Optional[ Union[ List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat, ] ] = Field( default=None, description="The start time of the clip expressed as the number of seconds from the beginning of the" "work.", ) partOfSeries: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The series to which this episode or season belongs.", ) endOffset: Optional[ Union[ List[Union[str, Any, StrictInt, StrictFloat]], str, Any, StrictInt, StrictFloat, ] ] = Field( default=None, description="The end time of the clip expressed as the number of seconds from the beginning of the work.", ) director: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A director of e.g. TV, radio, movie, video gaming etc. content, or of an event. Directors" "can be associated with individual items or with a series, episode, clip.", ) directors: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A director of e.g. TV, radio, movie, video games etc. content. Directors can be associated" "with individual items or with a series, episode, clip.", ) musicBy: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="The composer of the soundtrack.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/VideoGameClip.py
0.925592
0.343232
VideoGameClip.py
pypi
from __future__ import annotations from datetime import * from time import * from typing import * from pydantic import * class EUEnergyEfficiencyCategoryE(BaseModel): """Represents EU Energy Efficiency Class E as defined in EU energy labeling regulations. References: https://schema.org/EUEnergyEfficiencyCategoryE Note: Model Depth 6 Attributes: potentialAction: (Optional[Union[List[Union[str, Any]], str, Any]]): Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role. mainEntityOfPage: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details. subjectOf: (Optional[Union[List[Union[str, Any]], str, Any]]): A CreativeWork or Event about this Thing. url: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of the item. alternateName: (Union[List[Union[str, Any]], str, Any]): An alias for the item. sameAs: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website. description: (Union[List[Union[str, Any]], str, Any]): A description of the item. disambiguatingDescription: (Union[List[Union[str, Any]], str, Any]): A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation. identifier: (Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]): The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details. image: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An image of the item. This can be a [[URL]] or a fully described [[ImageObject]]. name: (Union[List[Union[str, Any]], str, Any]): The name of the item. additionalType: (Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]]): An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally. supersededBy: (Optional[Union[List[Union[str, Any]], str, Any]]): Relates a term (i.e. a property, class or enumeration) to one that supersedes it. """ type_: str = Field(default="EUEnergyEfficiencyCategoryE", alias="@type", const=True) potentialAction: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Indicates a potential Action, which describes an idealized action in which this thing" "would play an 'object' role.", ) mainEntityOfPage: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="Indicates a page (or other CreativeWork) for which this thing is the main entity being" "described. See [background notes](/docs/datamodel.html#mainEntityBackground)" "for details.", ) subjectOf: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="A CreativeWork or Event about this Thing.", ) url: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of the item.", ) alternateName: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="An alias for the item.", ) sameAs: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="URL of a reference Web page that unambiguously indicates the item's identity. E.g. the" "URL of the item's Wikipedia page, Wikidata entry, or official website.", ) description: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A description of the item.", ) disambiguatingDescription: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="A sub property of description. A short description of the item used to disambiguate from" "other, similar items. Information from other properties (in particular, name) may" "be necessary for the description to be useful for disambiguation.", ) identifier: Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] = Field( default=None, description="The identifier property represents any kind of identifier for any kind of [[Thing]]," "such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for" "representing many of these, either as textual strings or as URL (URI) links. See [background" "notes](/docs/datamodel.html#identifierBg) for more details.", ) image: Optional[Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any]] = Field( default=None, description="An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].", ) name: Union[List[Union[str, Any]], str, Any] = Field( default=None, description="The name of the item.", ) additionalType: Optional[ Union[List[Union[str, AnyUrl, Any]], str, AnyUrl, Any] ] = Field( default=None, description="An additional type for the item, typically used for adding more specific types from external" "vocabularies in microdata syntax. This is a relationship between something and a class" "that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof'" "attribute - for multiple types. Schema.org tools may have only weaker understanding" "of extra types, in particular those defined externally.", ) supersededBy: Optional[Union[List[Union[str, Any]], str, Any]] = Field( default=None, description="Relates a term (i.e. a property, class or enumeration) to one that supersedes it.", )
/schemaorg_types-0.4.0.tar.gz/schemaorg_types-0.4.0/schemaorg_types/EUEnergyEfficiencyCategoryE.py
0.943458
0.313236
EUEnergyEfficiencyCategoryE.py
pypi
import os import logging import re from lxml.etree import ElementTree as XMLElementTree, Element as XMLElement, SubElement as XMLSubelement, Comment as XMLComment, QName, indent import json from schemata.structures import * logger = logging.getLogger("schemata.exporters") class JSONSchemasExporter(object): """ This class takes a Schemata schema and exports it into the JSON Schemas format. To do: This class does not implement *every* feature of JSON Schemas yet, as it has been developed for a very specific use-case. ... Attributes ---------- _js : string The JSON Schemas specification being used. This is the value of the $schema property. Do not change. """ def __init__(self): self._js = "https://json-schema.org/draft/2020-12/schema" def exportSchema(self, schema, versionNumber, schemaURI, filePath = ""): """ Constructs the root JSON object for a JSON Schemas export of the given schema, and saves it to the given file path. Parameters ---------- schema : Schema The schema to export. versionNumber : string The version number of this schema. Can follow any convention you like. schemaURI : string A URI to this schema. filePath : string The path to which to save the output. Use an empty string to indicate that the output should not be saved as a file. Returns ------- The JSON object. """ logging.debug("Exporting schema for {} as JSON Schema.".format(schema.formatName)) o1 = {} o1["$schema"] = self._js o1["$id"] = schemaURI o1["title"] = "{} ({})".format(schema.formatName, versionNumber) o1["type"] = "object" rootObject = schema.getRootObjectStructures()[0] self._exportObject(schema, rootObject, o1) if filePath != "": with open(filePath, "w") as fo: json.dump(o1, fo, indent=4) return o1 def _exportArray(self, schema, _array, jsonObject): """ Internal function. Exports an array structure to the JSON Schemas format. Parameters ---------- schema : Schema The schema being exported. _array : ArrayStructure The array structure being exported. jsonObject : dictionary The dictionary / JSON object to which to attach this array. Returns ------- None """ logger.debug("Exporting array structure {}.".format(_array.reference)) jsonObject["type"] = "array" if _array.itemTypeReference == "string": jsonObject["items"] = {"type": "string"} elif _array.itemTypeReference == "integer": jsonObject["items"] = {"type": "integer"} elif _array.itemTypeReference == "decimal": jsonObject["items"] = {"type": "decimal"} elif _array.itemTypeReference == "boolean": jsonObject["items"] = {"type": "boolean"} elif isinstance(_array.itemType, DataStructure): ds = _array.itemType if ds.baseStructureReference == "string": jsonObject["items"] = {} jsonObject["items"]["type"] = "string" if ds.allowedPattern != "": jsonObject["items"]["pattern"] = ds.allowedPattern if ds.allowedValues != []: jsonObject["items"]["enum"] = ds.allowedValues def _exportObject(self, schema, _object, jsonObject): """ Internal function. Exports an object structure to the JSON Schemas format. Parameters ---------- schema : Schema The schema being exported. _object : ObjectStructure The object structure being exported. jsonObject : dictionary The dictionary / JSON object to which to attach this array. Returns ------- None """ logger.debug("Exporting object structure {}.".format(_object.reference)) jsonObject["type"] = "object" jsonObject["properties"] = {} jsonObject["required"] = [] jsonObject["additionalProperties"] = False for _property in _object.properties: p = _property.propertyStructure pn = p.propertyName jsonObject["properties"][pn] = {} jsonObject["properties"][pn]["description"] = p.metadata.description self._exportProperty(schema, p, jsonObject["properties"][pn]) if _property.isOptional == False: jsonObject["required"].append(pn) def _exportProperty(self, schema, _property, jsonObject): """ Internal function. Exports a property structure to the JSON Schemas format. Parameters ---------- schema : Schema The schema being exported. _property : PropertyStructure The property structure being exported. jsonObject : dictionary The dictionary / JSON object to which to attach this array. Returns ------- None """ logger.debug("Exporting property structure {}.".format(_property.reference)) if _property.valueTypeReference == "string": jsonObject["type"] = "string" elif _property.valueTypeReference == "integer": jsonObject["type"] = "integer" elif _property.valueTypeReference == "decimal": jsonObject["type"] = "number" elif _property.valueTypeReference == "boolean": jsonObject["type"] = "boolean" elif isinstance(_property.valueType, DataStructure): ds = _property.valueType if ds.baseStructureReference == "string": jsonObject["type"] = "string" if ds.allowedPattern != "": jsonObject["pattern"] = ds.allowedPattern if ds.allowedValues != []: jsonObject["enum"] = ds.allowedValues elif isinstance(_property.valueType, ArrayStructure): _as = _property.valueType self._exportArray(schema, _as, jsonObject) elif isinstance(_property.valueType, ObjectStructure): _os = _property.valueType self._exportObject(schema, _os, jsonObject) class XSDExporter(object): """ A class that takes a Schemata schema and exports it to the XSD format. ... Attributes ---------- _xs : string The value of xmlns:xs. Do not change. _typePrefix : string XSD allows the definition of reusable types. This exporter takes full advantage of that, as it's an approach that closely matches how Schemata works, making the export easier. This string is a prefix that's applied to type names in the XSD, to distinguish them easily from other kinds of elements. You can change this if you want, but there's not much point. Set to '__type__' by default. """ def __init__(self): self._xs = "http://www.w3.org/2001/XMLSchema" self._typePrefix = "__type__" def _getXSDTypeName(self, structure): if isinstance(structure, DataStructure): return self._typePrefix + "d__" + structure.reference if isinstance(structure, ElementStructure): return self._typePrefix + "e__" + structure.reference if isinstance(structure, AttributeStructure): return self._typePrefix + "a__" + structure.reference raise Exception("Cannot create XSD type name for {}.".format(structure.reference)) def exportSchema(self, schema, versionNumber, filePath = ""): """ Exports a schema as XSD, saving it to the given file path. Parameters ---------- schema : Schema The schema to export. versionNumber : string The version number of the schema. filePath : string The path to which to save the XSD file. Use an empty string to indicate that the XSD object should not be saved to a file. Returns ------- An ElementTree which is the XSD schema. """ xs = self._xs logging.debug("Exporting schema for {} as XSD.".format(schema.formatName)) e1 = XMLElement(QName(xs, "schema")) e1.set("elementFormDefault", "qualified") # Put a comment at the top of the XSD file saying what XML format it's for. if schema.formatName != "": c1 = XMLComment(" An XSD file for {} ({}). ".format(schema.formatName, versionNumber)) e1.append(c1) # First export all of the data structures, as these are more basic, and then export all of the element structures. self._exportDataStructures(schema, e1) self._exportElementStructures(schema, e1) logging.debug("Exporting root elements.") # The root element structures are exported last. This means that the output XSD file should be 'read' in reverse. roots = schema.getRootElementStructures() for root in roots: logging.debug("Exporting element <{}>.".format(root.elementName)) e2 = XMLElement(QName(xs, "element")) e2.set("name", root.elementName) e2.set("type", self._getXSDTypeName(root)) e1.append(e2) tree = XMLElementTree(e1) indent(tree, space=" ") if filePath != "": tree.write(filePath, xml_declaration=True, encoding="utf-8", pretty_print=True) return tree def _exportDataStructures(self, schema, xsdElement): """ Exports all of the data structures in the schema. Parameters ---------- schema : Schema The schema being exported. xsdElement : Element The XML element to which to attach these data structures. Returns ------- None """ xs = self._xs logging.debug("Exporting data structures.") dataStructures = schema.getDataStructures() for dataStructure in dataStructures: # If a data structure isn't actually used in the schema, don't export it. if not dataStructure.isUsed: continue logging.debug("Exporting data structure '{}'.".format(dataStructure.reference)) e1 = XMLElement(QName(xs, "simpleType")) e1.set("name", self._getXSDTypeName(dataStructure)) if dataStructure.baseStructureReference == "string": logging.debug(f"'{dataStructure.reference}' has an XSD base of string.") e2 = XMLElement(QName(xs, "restriction")) e2.set("base", "xs:string") if dataStructure.allowedPattern != "": logging.debug(f"Setting pattern value to '{dataStructure.allowedPattern}'") e3 = XMLElement(QName(xs, "pattern")) e3.set("value", dataStructure.allowedPattern) e2.append(e3) elif dataStructure.allowedValues: logging.debug(f"Setting enumeration values.") for value in dataStructure.allowedValues: e3 = XMLElement(QName(xs, "enumeration")) e3.set("value", value) e2.append(e3) e1.append(e2) elif dataStructure.baseStructureReference == "decimal": logging.debug(f"'{dataStructure.reference}' has an XSD base of decimal.") e2 = XMLElement(QName(xs, "restriction")) e2.set("base", "xs:decimal") e1.append(e2) elif dataStructure.baseStructureReference == "integer": logging.debug(f"'{dataStructure.reference}' has an XSD base of integer.") e2 = XMLElement(QName(xs, "restriction")) e2.set("base", "xs:integer") if dataStructure.minimumValue != None: logging.debug(f"Setting minInclusive value.") e3 = XMLElement(QName(xs, "minInclusive")) e3.set("value", str(dataStructure.minimumValue)) e2.append(e3) if dataStructure.maximumValue != None: logging.debug(f"Setting maxInclusive value.") e3 = XMLElement(QName(xs, "maxInclusive")) e3.set("value", str(dataStructure.maximumValue)) e2.append(e3) e1.append(e2) elif dataStructure.baseStructureReference == "boolean": logging.debug(f"'{dataStructure.reference}' has an XSD base of boolean.") e2 = XMLElement(QName(xs, "restriction")) e2.set("base", "xs:boolean") e1.append(e2) xsdElement.append(e1) logging.debug("Exported data structure '{}'.".format(dataStructure.reference)) logging.debug("Exported data structures.") def _exportElementStructures(self, schema, xsdElement): """ Exports all of the element structures in the schema. Parameters ---------- schema : Schema The schema being exported. xsdElement : Element The XML element to which to attach these element structures. Returns ------- None """ xs = self._xs logging.debug("Exporting element structures.") elementStructures = schema.getElementStructures() for elementStructure in elementStructures: # If the element structure isn't actually used in the schema (i.e., it isn't defined as a possible root element or subelement of another element), don't export it. if not elementStructure.isUsed: continue logging.debug("Exporting element structure '{}' <{}>.".format(elementStructure.reference, elementStructure.elementName)) # Here we decide whether the element is a 'simpleType' element or a 'complexType' element - it's quite an unintuitive distinction. if not elementStructure.hasContent: e1 = XMLElement(QName(xs, "complexType")) e1.set("name", self._getXSDTypeName(elementStructure)) self._exportAttributes(schema, elementStructure.attributes, e1) xsdElement.append(e1) elif elementStructure.contentIsElementsOnly: e1 = XMLElement(QName(xs, "complexType")) e1.set("name", self._getXSDTypeName(elementStructure)) e1.set("mixed", "false") self._exportSubelements(schema, elementStructure.allowedContent, e1) self._exportAttributes(schema, elementStructure.attributes, e1) xsdElement.append(e1) elif elementStructure.contentIsElementsAndAnyText: e1 = XMLElement(QName(xs, "complexType")) e1.set("name", self._getXSDTypeName(elementStructure)) e1.set("mixed", "true") self._exportSubelements(schema, elementStructure.allowedContent, e1) self._exportAttributes(schema, elementStructure.attributes, e1) xsdElement.append(e1) elif elementStructure.contentIsAnyText and elementStructure.hasAttributes: e1 = XMLElement(QName(xs, "complexType")) e1.set("name", self._getXSDTypeName(elementStructure)) e2 = XMLElement(QName(xs, "simpleContent")) e3 = XMLElement(QName(xs, "extension")) e3.set("base", "xs:string") self._exportAttributes(schema, elementStructure.attributes, e3) e2.append(e3) e1.append(e2) xsdElement.append(e1) elif elementStructure.contentIsAnyText and not elementStructure.hasAttributes: e1 = XMLElement(QName(xs, "simpleType")) e1.set("name", self._getXSDTypeName(elementStructure)) e2 = XMLElement(QName(xs, "restriction")) e2.set("base", "xs:string") e1.append(e2) xsdElement.append(e1) elif elementStructure.contentIsSingleValue and elementStructure.hasAttributes: e1 = XMLElement(QName(xs, "complexType")) e1.set("name", self._getXSDTypeName(elementStructure)) e2 = XMLElement(QName(xs, "simpleContent")) e3 = XMLElement(QName(xs, "extension")) if elementStructure.valueTypeReference == "decimal": e3.set("base", "xs:decimal") elif elementStructure.valueTypeReference == "integer": e3.set("base", "xs:integer") elif elementStructure.valueTypeReference == "boolean": e3.set("base", "xs:boolean") else: e3.set("base", self._getXSDTypeName(elementStructure.valueType)) self._exportAttributes(schema, elementStructure.attributes, e3) e2.append(e3) e1.append(e2) xsdElement.append(e1) elif elementStructure.contentIsSingleValue and not elementStructure.hasAttributes: e1 = XMLElement(QName(xs, "simpleType")) e1.set("name", self._getXSDTypeName(elementStructure)) e2 = XMLElement(QName(xs, "restriction")) if elementStructure.valueTypeReference == "decimal": e2.set("base", "xs:decimal") if elementStructure.valueTypeReference == "integer": e2.set("base", "xs:integer") elif elementStructure.valueTypeReference == "boolean": e2.set("base", "xs:boolean") else: e2.set("base", self._getXSDTypeName(elementStructure.valueType)) e1.append(e2) xsdElement.append(e1) else: logging.warn("Could not export element structure '{}' <{}>.".format(elementStructure.reference, elementStructure.elementName)) def _exportSubelements(self, schema, elements, xsdElement): """ Exports a structure list to XSD. Parameters ---------- schema : Schema The schema being exported. elements : StructureList The structure list being exported (representing the subelements of an element). xsdElement : Element The XML element to which to attach this list. Returns ------- None """ xs = self._xs xsdIndicatorType = "sequence" # Schemata uses a bit more natural language when it comes to lists of elements than XSD does. # Here we have to translate from the language of Schemata to the language of XSD. # OrderedStructureList correlates clearly to an XSD sequence. # StructureChoice correlates clearly to an XSD choice. # UnorderedStructureList can be represented by an XSD choice that can be used any number of times. (This is not perfect.) # This here is one of the reasons why Schemata is nicer to use than XSD - this is a pain to do by hand in XSD. if isinstance(elements, OrderedStructureList): e1 = XMLElement(QName(xs, "sequence")) if isinstance(elements, UnorderedStructureList): xsdIndicatorType = "choice" e1 = XMLElement(QName(xs, "choice")) e1.set("minOccurs", "0") e1.set("maxOccurs", "unbounded") if isinstance(elements, StructureChoice): xsdIndicatorType = "choice" e1 = XMLElement(QName(xs, "choice")) if elements == None: return for element in elements.structures: if isinstance(element, OrderedStructureList) or isinstance(element, UnorderedStructureList) or isinstance(element, StructureChoice): self._exportSubelements(schema, element, e1) else: if isinstance(element, AnyTextUsageReference): continue logging.debug(f"Exporting element of type {type(element)}.") logging.debug(f"Exporting {self._getXSDTypeName(element.elementStructure)}.") e3 = XMLElement(QName(xs, "element")) e3.set("name", element.elementStructure.elementName) e3.set("type", self._getXSDTypeName(element.elementStructure)) p = element.minimumNumberOfOccurrences q = element.maximumNumberOfOccurrences if xsdIndicatorType == "sequence": if p != 1: e3.set("minOccurs", str(p)) if q != 1: e3.set("maxOccurs", "unbounded" if q == -1 else str(q)) e1.append(e3) xsdElement.append(e1) def _exportAttributes(self, schema, attributes, xsdElement): """ Exports the given attribute structures to XSD. Parameters ---------- schema : Schema The schema being exported. attributes : list of AttributeStructure The attribute structures being exported. xsdElement : Element The XML element to which to attach these attribute structures. Returns ------- None """ xs = self._xs baseTypes = ["string", "integer", "boolean"] for attribute in attributes: a = attribute.attributeStructure e1 = XMLElement(QName(xs, "attribute")) e1.set("name", a.attributeName) if a.dataStructureReference in baseTypes: if a.dataStructureReference == "string": e1.set("type", "xs:string") if a.dataStructureReference == "integer": e1.set("type", "xs:integer") if a.dataStructureReference == "boolean": e1.set("type", "xs:boolean") else: e1.set("type", self._getXSDTypeName(a.dataStructure)) e1.set("use", "optional" if attribute.isOptional else "required") xsdElement.append(e1) class SpecificationGenerator(object): """ A class for generating specifications / human-readable documentation from Schemata schemas. The generated documents are in Markdown. The generation is not perfect, but it makes the process of writing a specification / human-readable documentation a lot quicker. To do: This only works for XML schemas at the moment - need to extend it to JSON schemas. Would be nice to be able to generate documentation in HTML as well as Markdown. """ def __init__(self): pass def generateSpecification(self, schema, filePath): with open(filePath, "w") as fileObject: rootElements = schema.getPossibleRootElementStructures() nonRootElements = schema.getNonRootElementStructures() elements = rootElements + nonRootElements fileObject.write("# {} Specification\n\n".format(schema.formatName)) fileObject.write("This document gives the specification for {}.\n\n".format(schema.formatName)) fileObject.write("## Table of Contents\n\n") for element in elements: fileObject.write("- [The &lt;{}&gt; element](#the-{}-element)\n".format(element.elementName, re.sub("_", "-", element.elementName))) for element in elements: fileObject.write("\n\n<br /><br />\n\n") fileObject.write("## The &lt;{}&gt; element\n\n".format(element.elementName)) fileObject.write("{}\n\n".format(element.description.replace("<", "&lt;").replace(">", "&gt;"))) fileObject.write("### Attributes\n\n") aa = [] if element.attributes: fileObject.write("| Name | Required | Allowed Values | Description |\n") fileObject.write("|---|---|---|---|\n") for attribute in element.attributes: a = schema.getAttributeStructureByReference(attribute.attributeReference) d = schema.getDataStructureByReference(a.dataStructure) aa.append(a) allowedValuesText = d.description if d.allowedValues and d.description == "": allowedValuesText = "one of: {}".format(", ".join(["`{}`".format(v) for v in d.allowedValues])) elif d.allowedPattern and d.description == "" and d.baseStructure == "string": allowedValuesText = f"a string with the pattern `{d.allowedPattern}`" fileObject.write("| `{}` | {} | {} | {} |\n".format(a.attributeName, "Required" if not attribute.isOptional else "Optional", allowedValuesText, a.description)) fileObject.write("\n") else: fileObject.write("None\n\n") fileObject.write("### Possible Subelements\n\n") ee = [] if element.subelements: for subelement in element.subelements.elements: e = schema.getElementStructureByReference(subelement.elementReference) ee.append(e) fileObject.write("- &lt;{}&gt;\n".format(e.elementName)) fileObject.write("\n") else: fileObject.write("None\n\n") fileObject.write("### Examples\n\n") fileObject.write("Below is shown an example of the `<{}>` element.\n\n".format(element.elementName)) fileObject.write("```xml\n") attributeString = " ".join(["{}=\"{}\"".format(a.attributeName, "..." if a.exampleValue == "" else a.exampleValue) for a in aa]) if element.isSelfClosing == False: if aa: fileObject.write("<{} {}>\n".format(element.elementName, attributeString)) else: fileObject.write("<{}>\n".format(element.elementName)) if element.allowedContent == "text only": fileObject.write(" {}\n".format(element.exampleValue)) else: for e in ee: if e.isSelfClosing == False: fileObject.write(" <{}></{}>\n".format(e.elementName, e.elementName)) else: fileObject.write(" <{} />\n".format(e.elementName, e.elementName)) fileObject.write("</{}>\n".format(element.elementName)) else: if aa: fileObject.write("<{} {} />\n".format(element.elementName, attributeString)) else: fileObject.write("<{} />\n".format(element.elementName)) fileObject.write("```\n\n") class ExampleFileGenerator(object): """ A class for generating example files that conform to a given schema. ... To do: At the moment this only works for XML schemas - would be nice to get this working for JSON schemas too. Would be nice to get this generating invalid XML files too, for automatic testing of schemas. """ def __init__(self): pass def generateExampleFiles(self, schema, folderPath): os.makedirs(folderPath, exist_ok=True) rootElements = schema.getRootElementStructures() for rootElement in rootElements: filePath = os.path.join(folderPath, "example1.xml") e1 = XMLElement(rootElement.elementName) self._generateAttributes(rootElement, e1) self._generateSubelements(rootElement, e1) tree = XMLElementTree(e1) indent(tree, space=" ") tree.write(filePath, xml_declaration=True, encoding="utf-8", pretty_print=True) def _generateAttributes(self, elementStructure, e1): for attributeUsageReference in elementStructure.attributes: s = attributeUsageReference.attributeStructure logger.debug(f"Generating attribute '{s.attributeName}'.") if s.dataStructure != None: e1.set(s.attributeName, s.dataStructure.metadata.exampleValue) def _generateSubelements(self, elementStructure, e1): if elementStructure.contentIsSingleValue: if elementStructure.valueType != None: e1.text = elementStructure.valueType.metadata.exampleValue elif elementStructure.metadata.exampleValue != "": e1.text = elementStructure.metadata.exampleValue if elementStructure.contentIsAnyText: e1.text = elementStructure.metadata.exampleValue if isinstance(elementStructure.allowedContent, OrderedStructureList): for structure in elementStructure.allowedContent.structures: if isinstance(structure, ElementUsageReference): n = 1 if structure.maximumNumberOfOccurrences == -1: n = 3 elif structure.maximumNumberOfOccurrences <= 3: n = structure.maximumNumberOfOccurrences for x in range(n): e2 = XMLElement(structure.elementStructure.elementName) self._generateAttributes(structure.elementStructure, e2) self._generateSubelements(structure.elementStructure, e2) e1.append(e2) """ Functions that allow you to do the export process in a single line. Best not to use these if you're going to export a large number of schemas. """ def exportSchemaAsXSD(schema, versionNumber, filePath): """ Exports the given schema as an XSD document. Parameters ---------- schema : Schema The schema to export. versionNumber : string The version number of the schema. filePath : string The path to which to save the XSD file. Returns ------- None """ xsdExporter = XSDExporter() xsdExporter.exportSchema(schema, versionNumber, filePath) def exportSchemaAsJSONSchema(schema, versionNumber, filePath): """ Exports the given schema as a JSON Schemas document. Parameters ---------- schema : Schema The schema to export. versionNumber : string The version number of the schema. filePath : string The path to which to save the JSON Schemas file. Returns ------- None """ jsonSchemasExporter = JSONSchemasExporter() jsonSchemasExporter.exportSchema(schema, versionNumber, filePath) def generateSpecification(schema, filePath): """ Generates a specification / human-readable documentation for the given schema. Some manual editing will be required, but this function can save a lot of time. Parameters ---------- schema : Schema The schema to create a specification for. filePath : string The file path to which to save the specification. Returns ------- None """ specificationGenerator = SpecificationGenerator() specificationGenerator.generateSpecification(schema, filePath) def generateExampleXMLFiles(schema, folderPath): """ Generates example XML files for the given schema. Some manual editing will be required, but this function can save time at the start of defining a new format. Parameters ---------- schema : Schema The schema to create example files for. folderPath : string The folder to which to save the example files. Returns ------- None """ exampleFileGenerator = ExampleFileGenerator() exampleFileGenerator.generateExampleFiles(schema, folderPath)
/schematacode-1.0.1.tar.gz/schematacode-1.0.1/schemata/exporters.py
0.720565
0.332256
exporters.py
pypi
import logging logger = logging.getLogger("schemata.structures") """ This module contains the core classes for Schemata. There are, broadly, three categories of class here. The first is the Schema class, which simply represents the entire schema. It contains all of the structures. The second is structure classes. A Schemata document consists of a set of different kinds of structures. A structure might define an XML element, or an XML attribute, or a JSON object, and so on. The third is usage references. A usage reference defines how one structure uses another structure. For example, an element structure might want to refer to a number of attribute structures that it uses. However, these attribute structures might be optional in this context, or have different default values to usual. This is what the usage reference defines. """ class Schema(object): """ Represents a schema. ... Attributes ---------- formatName : str the name of this XML or JSON format structures : list the structures in this format dependencies : list the schemas on which this schema depends, if any """ def __init__(self): self.formatName = "" self.structures = [] self.dependencies = [] @property def _allStructures(self): return [structure for dependency in self.dependencies for structure in dependency.structures] + self.structures def getStructureByReference(self, reference): """ Gets the structure with the given reference. Parameters ---------- reference : str the reference of the structure to get Returns ------- Either the structure with the given reference, or None if none is found. """ d = {structure.reference : structure for structure in self._allStructures} return d.get(reference, None) def getDataStructures(self): """ Gets all of the data structures in this schema. Returns ------- A list of data structures. """ return [structure for structure in self._allStructures if isinstance(structure, DataStructure)] def getAttributeStructures(self): """ Gets all of the XML attribute structures in this schema. Returns ------- A list of attribute structures. """ return [structure for structure in self._allStructures if isinstance(structure, AttributeStructure)] def getElementStructures(self): """ Gets all of the XML element structures in this schema. Returns ------- A list of element structures. """ return [structure for structure in self._allStructures if isinstance(structure, ElementStructure)] def getRootElementStructures(self): """ Gets all of the XML root element structures in this schema. Returns ------- A list of element structures. """ return [structure for structure in self.getElementStructures() if structure.canBeRootElement] def getNonRootElementStructures(self): """ Gets all of the XML non-root element structures in this schema. Returns ------- A list of element structures. """ return [structure for structure in self.getElementStructures() if not structure.canBeRootElement] def getObjectStructures(self): """ Gets all of the JSON object structures in this schema. Returns ------- A list of object structures. """ return [structure for structure in self._allStructures if isinstance(structure, ObjectStructure)] def getRootObjectStructures(self): """ Gets all of the JSON root object structures in this schema. Returns ------- A list of object structures. """ return [structure for structure in self.getObjectStructures() if structure.canBeRootObject] def setIsUsed(self): """ Sets the isUsed property for every structure in the schema. This property determines whether or not a structure will appear in the exported schema format, so this function must be called by the exporter at some point. Returns ------- None """ rootElementStructures = self.getRootElementStructures() for rootElementStructure in rootElementStructures: rootElementStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "formatName": self.formatName, "structures": [structure.toJSON() if hasattr(structure, "toJSON") else structure for structure in self.structures] } class StructureMetadata(object): """ Captures the metadata for a structure. The metadata is used in automatic documentation generation. ... Attributes ---------- description : str a description of this structure exampleValue : str an example value for this structure, if relevant """ def __init__(self): self.description = "" self.exampleValue = "" def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "description": self.description, "exampleValue": self.exampleValue } class Structure(object): """ A base class for all structures. Defines common properties. ... Attributes ---------- schema : Schema the schema that this structure is in baseStructureReference : string the reference of the base structure of this structure baseStructure : Structure the base structure of this structure reference : string the reference of this structure isUsed : boolean whether or not this structure is used in the schema metadata : StructureMetadata the metadata for this structure """ def __init__(self, reference = ""): self.schema = None self.baseStructureReference = "" self.reference = reference self.isUsed = False self.metadata = StructureMetadata() @property def baseStructure(self): return self.schema.getStructureByReference(self.baseStructureReference) def setIsUsed(self): """ Sets the isUsed property for this structure and the structures it depends on. Returns ------- None """ self.isUsed = True if self.baseStructure != None: self.baseStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return {} class DataStructure(Structure): """ A class for data structures. ... Attributes ---------- allowedPattern : string a regular expression describing what patterns this data structure can have (if the base type is a string) allowedValues : list a list of the different values that this data structure can have (effectively defining an enumeration); can be a list of strings or numbers minimumValue : number the minimum value (inclusive) that this data structure can have (if the base type is an integer or a decimal) maximumValue : number the maximum value (inclusive) that this data structure can have (if the base type is an integer or a decimal) defaultValue : any the default value that this data structure takes """ def __init__(self, reference = ""): super().__init__(reference) self.allowedPattern = "" self.allowedValues = [] self.minimumValue = None self.maximumValue = None self.defaultValue = None def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "DataStructure", "baseStructureReference": self.baseStructureReference, "reference": self.reference, "isUsed": self.isUsed, "metadata": self.metadata.toJSON(), "allowedPattern": self.allowedPattern, "allowedValues": self.allowedValues, "minimumValue": self.minimumValue, "maximumValue": self.maximumValue, "defaultValue": self.defaultValue } class ListFunction(object): """ A class that represents a kind of 'meta-data-structure'. If a list function is used in a Schemata file, it means that a data type should be used that is a list of another data type. Adding list-like data types manually is tedious, as it often involves writing a very complex regular expression - list functions do this automatically in Schemata. ... Attributes ---------- schema : Schema the schema that this list function is in dataStructureReference : string the reference of the data structure that this list function should be a list of dataStructure : DataStructure the data structure that this list function should be a list of separator : string the character or set of characters that should act as separators in this list - usually a comma or a semi-colon """ def __init__(self, dataStructureReference, separator): self.schema = None self.dataStructureReference = dataStructureReference self.separator = separator @property def dataStructure(self): return self.schema.getStructureByReference(self.dataStructureReference) def setIsUsed(self): """ Sets the isUsed property for the structure this list function depends on. Returns ------- None """ if self.dataStructure != None: self.dataStructure.setIsUsed() class AttributeStructure(Structure): """ A class for XML attribute structures. ... Attributes ---------- attributeName : string the name of this XML attribute dataStructureReference : string the reference of the data structure that should be used as the value of this attribute dataStructure : DataStructure the data structure that should be used as the value of this attribute defaultValue : any the default value of this attribute; overrides the default value set by the data structure """ def __init__(self, reference = ""): super().__init__(reference) self.attributeName = "" self.dataStructureReference = "" self.defaultValue = None @property def dataStructure(self): return self.schema.getStructureByReference(self.dataStructureReference) def setIsUsed(self): """ Sets the isUsed property for this structure and the structures it depends on. Returns ------- None """ super().setIsUsed() if self.dataStructure != None: self.dataStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "AttributeStructure", "baseStructureReference": self.baseStructureReference, "reference": self.reference, "isUsed": self.isUsed, "metadata": self.metadata.toJSON(), "attributeName": self.attributeName, "dataStructureReference": self.dataStructureReference, "defaultValue": self.defaultValue } class ElementStructure(Structure): """ A class for XML element structures. ... Attributes ---------- elementName : string the tag name of this element canBeRootElement : boolean whether or not this element can be the root element of the XML document attributes : list a list of attribute usage references that specify what attributes this element can have allowedContent : a type of UsageReference or StructureList a hierarchical collection of objects defining what elements and data this element can contain valueTypeReference : a string if the content of this element is simply a value - i.e., a data structure - then this is the reference to that data structure valueType : DataStructure if the content of this element is simply a value - i.e., a data structure - then this is that data structure isSelfClosing : boolean whether or not this element should be self-closing; XSD is indifferent to whether elements are self-closing or not; this value is mainly used in the auto-generation of documentation lineBreaks : list of integers how many line breaks there should be before the opening tag, after the opening tag, before the closing tag, and after the closing tag; XSD is indifferent to the formatting of a given XML file; this value is mainly used in the auto-generation of documentation, and could also be used by IDE extensions for auto-formatting hasAttributes : boolean whether or not this element has any allowed attributes; mainly used by the Schemata exporter hasContent : boolean whether or not this element has any allowed content; mainly used by the Schemata exporter containsElementUsageReference : boolean whether or not this element structure contains and element usage references; in short, whether or not this element can contain other elements; mainly used by the Schemata exporter containsAnyTextUsageReference : boolean whether or not this element structure contains an 'any text' usage reference; in short, whether or not this element can contain any text (rather than a specific text string defined by a data structure); mainly used by the Schemata exporter contentIsAnyText : boolean whether or not this element can contain any text, but no elements; mainly used by the Schemata exporter contentIsSingleValue : boolean whether or not this element is a single value defined by a data structure; mainly used by the Schemata exporter contentIsElementsOnly : boolean whether or not this element can only contain other elements, and no text; mainly used by the Schemata exporter contentIsElementsAndAnyText : boolean whether or not this element is a mixed type - i.e., it can contain a mixture of subelements and any text; common in HTML-like documents; mainly used by the Schemata exporter """ def __init__(self, reference = ""): super().__init__(reference) self.elementName = "" self.canBeRootElement = False self.attributes = [] self.allowedContent = None self.valueTypeReference = "" self.isSelfClosing = False self.lineBreaks = [0, 1, 1, 1] @property def hasAttributes(self): return len(self.attributes) > 0 @property def hasContent(self): if self.allowedContent == None: return False if isinstance(self.allowedContent, StructureList) and len(self.allowedContent.structures) == 0: return False return True @property def containsElementUsageReference(self): if isinstance(self.allowedContent, ElementUsageReference): return True if isinstance(self.allowedContent, AnyElementsUsageReference): return True if isinstance(self.allowedContent, StructureList): # To do: this needs to be recursive. if len([structure for structure in self.allowedContent.structures if isinstance(structure, ElementUsageReference) or isinstance(structure, AnyElementsUsageReference) or isinstance(structure, StructureList)]) > 0: return True return False @property def containsAnyTextUsageReference(self): if isinstance(self.allowedContent, AnyTextUsageReference): return True if isinstance(self.allowedContent, StructureList): if len([structure for structure in self.allowedContent.structures if isinstance(structure, AnyTextUsageReference)]) > 0: return True return False @property def contentIsAnyText(self): return self.containsAnyTextUsageReference and not self.containsElementUsageReference @property def contentIsSingleValue(self): return isinstance(self.allowedContent, DataUsageReference) @property def contentIsElementsOnly(self): return self.containsElementUsageReference and not self.containsAnyTextUsageReference @property def contentIsElementsAndAnyText(self): return self.containsElementUsageReference and self.containsAnyTextUsageReference @property def valueType(self): return self.schema.getStructureByReference(self.valueTypeReference) def setIsUsed(self): """ Sets the isUsed property for this structure and the structures it depends on. Returns ------- None """ super().setIsUsed() for attributeUsageReference in self.attributes: logger.info(f"Setting isUsed for {self.reference}.{attributeUsageReference.attributeStructureReference}.") attributeUsageReference.attributeStructure.setIsUsed() if self.allowedContent != None: self.allowedContent.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "ElementStructure", "baseStructureReference": self.baseStructureReference, "reference": self.reference, "isUsed": self.isUsed, "metadata": self.metadata.toJSON(), "elementName": self.elementName, "canBeRootElement": self.canBeRootElement, "attributes": [a.toJSON() for a in self.attributes], "allowedContent": None if self.allowedContent == None else self.allowedContent.toJSON(), "isSelfClosing": self.isSelfClosing, "lineBreaks": self.lineBreaks } class PropertyStructure(Structure): """ A class for JSON property structures. ... Attributes ---------- propertyName : string the name of this JSON property valueTypeReference : string the reference of the structure that the value of this property should be valueType : Structure the structure that the value of this property should be; can be a data structure, an array structure, or an object structure """ def __init__(self, reference = ""): super().__init__(reference) self.propertyName = "" self.valueTypeReference = "" @property def valueType(self): return self.schema.getStructureByReference(self.valueTypeReference) def setIsUsed(self): """ Sets the isUsed property for this structure and the structures it depends on. Returns ------- None """ super().setIsUsed() if self.valueType != None: self.valueType.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "PropertyStructure", "baseStructureReference": self.baseStructureReference, "reference": self.reference, "isUsed": self.isUsed, "metadata": self.metadata.toJSON(), "valueTypeReference": self.valueTypeReference } class ArrayStructure(Structure): """ A class for JSON array structures. ... Attributes ---------- itemTypeReference : string the reference to the structure that all of the items of this array conform to itemType : Structure the structure that all of the items of this array conform to; can be a data structure, an array structure, or an object structure """ def __init__(self, reference = ""): super().__init__(reference) self.itemTypeReference = "" @property def itemType(self): return self.schema.getStructureByReference(self.itemTypeReference) def setIsUsed(self): """ Sets the isUsed property for this structure and the structures it depends on. Returns ------- None """ super().setIsUsed() if self.itemType != None: self.itemType.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "ArrayStructure", "baseStructureReference": self.baseStructureReference, "reference": self.reference, "isUsed": self.isUsed , "metadata": self.metadata.toJSON(), "itemTypeReference": self.itemTypeReference } class ObjectStructure(Structure): """ A class for JSON object structures. ... Attributes ---------- canBeRootObject : boolean whether or not this object structure can be the root object of the JSON document properties : list a list of property usage references that defines the properties this object can have """ def __init__(self, reference = ""): super().__init__(reference) self.canBeRootObject = False self.properties = [] def setIsUsed(self): """ Sets the isUsed property for this structure and the structures it depends on. Returns ------- None """ super().setIsUsed() for propertyUsageReference in self.properties: propertyUsageReference.propertyStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "ObjectStructure", "baseStructureReference": self.baseStructureReference, "reference": self.reference, "isUsed": self.isUsed, "metadata": self.metadata.toJSON(), "canBeRootObject": self.canBeRootObject, "properties": [p.toJSON() for p in self.properties] } class UsageReference(object): """ A usage reference base class. While structures *define* things like elements or objects, usage references are statements of how and where they are used. A usage reference includes things like whether an attribute is optional, or how many times a subelement should be repeated. ... Attributes ---------- schema : Schema the schema that this usage reference is used in. """ def __init__(self): self.schema = None def setIsUsed(self): pass def toJSON(self): return {} class DataUsageReference(UsageReference): """ A class for a data structure usage reference. ... Attributes ---------- dataStructureReference : string the reference of the data structure that this usage reference pertains to dataStructure : DataStructure the data structure that this usage reference pertains to """ def __init__(self): super().__init__() self.dataStructureReference = "" @property def dataStructure(self): return self.schema.getStructureByReference(self.dataStructureReference) def setIsUsed(self): """ Sets the isUsed property for the structures this usage reference depends on. Returns ------- None """ if self.dataStructure != None: self.dataStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "dataStructureReference": self.dataStructureReference } class AttributeUsageReference(UsageReference): """ A class for an attribute structure usage reference. ... Attributes ---------- attributeStructureReference : string the reference of the attribute structure this usage reference pertains to attributeStructure : AttributeStructure the attribute structure this usage reference pertains to isOptional : boolean whether or not this attribute is optional in this context defaultValue : any the default value of this attribute in this context; overrides the default value set by the attribute structure and the default value set by the value data structure """ def __init__(self): super().__init__() self.attributeStructureReference = "" self.isOptional = False self.defaultValue = None @property def attributeStructure(self): return self.schema.getStructureByReference(self.attributeStructureReference) def setIsUsed(self): """ Sets the isUsed property for the structures this usage reference depends on. Returns ------- None """ if self.attributeStructure != None: self.attributeStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "attributeStructureReference": self.attributeStructureReference, "isOptional": self.isOptional, "defaultValue": self.defaultValue } class ElementUsageReference(UsageReference): """ A class for an element structure usage reference. ... Attributes ---------- elementStructureReference : string the reference of the element structure that this usage reference pertains to elementStructure : ElementStructure the element structure that this usage reference pertains to nExpression : array of tuples an array of tuples defining the limits of how many repeats of this element there should be minimumNumberOfOccurrences : integer the minimum number of occurrences (inclusive) that there must be of this element maximumNumberOfOccurrences : integer the maximum number of occurrences (inclusive) that there must be of this element """ def __init__(self): super().__init__() self.elementStructureReference= "" self.nExpression = None self.minimumNumberOfOccurrences = 1 self.maximumNumberOfOccurrences = 1 @property def elementStructure(self): return self.schema.getStructureByReference(self.elementStructureReference) def setIsUsed(self): """ Sets the isUsed property for the structures this usage reference depends on. Returns ------- None """ if self.elementStructure != None: self.elementStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "elementStructureReference": self.elementStructureReference, "minimumNumberOfOccurrences": self.minimumNumberOfOccurrences, "maximumNumberOfOccurrences": self.maximumNumberOfOccurrences } class PropertyUsageReference(UsageReference): """ A class for a property structure usage reference. ... Attributes ---------- propertyStructureReference : string the reference of the property structure that this usage reference pertains to propertyStructure : PropertyStructure the property structure that this usage reference pertains to isOptional : boolean whether this property is optional in this context defaultValue : any the default value of this property in this context """ def __init__(self): super().__init__() self.propertyStructureReference = "" self.isOptional = False self.defaultValue = None @property def propertyStructure(self): return self.schema.getStructureByReference(self.propertyStructureReference) def setIsUsed(self): """ Sets the isUsed property for the structures this usage reference depends on. Returns ------- None """ if self.propertyStructure != None: self.propertyStructure.setIsUsed() def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "propertyStructureReference": self.propertyStructureReference, "isOptional": self.isOptional, "defaultValue": self.defaultValue } class AnyAttributesUsageReference(UsageReference): """ A class for a wildcard usage reference that indicates that any attributes can be attached to an element. """ pass class AnyElementsUsageReference(UsageReference): """ A class for a wildcard usage reference that indicates that any element can be a subelement of an element. """ pass class AnyTextUsageReference(UsageReference): """ A class for a wildcard usage reference that indicates any text can be contained within an element. """ pass class AnyPropertiesUsageReference(UsageReference): """ A class for a wildcard usage reference that indicates that any properties can be attached to an object. """ pass class StructureList(object): """ Represents a list of structures usage references. This is used in the allowedContent property of the ElementStructure class. This is the base class. Derived classes specify whether the structures must appear in order, or whether the list represents a choice between structures. ... Attributes ---------- schema : Schema the schema that this structure list is in structures : list the structure usage references in this list containsText : boolean whether this structure list contains an AnyTextUsageReference, at any level of depth """ def __init__(self): self.schema = None self.structures = [] @property def containsText(self): for structureUsageReference in self.structures: if isinstance(structureUsageReference, AnyTextUsageReference): return True if isinstance(structureUsageReference, UnorderedStructureList): containsText = structureUsageReference.containsText() if containsText == True: return True if isinstance(structureUsageReference, OrderedStructureList): containsText = structureUsageReference.containsText() if containsText == True: return True if isinstance(structureUsageReference, StructureChoice): containsText = structureUsageReference.containsText() if containsText == True: return True def setIsUsed(self): """ Sets the isUsed property for the structures this list depends on. Returns ------- None """ for structureUsageReference in self.structures: structureUsageReference.setIsUsed() def toJSON(self): return {} class UnorderedStructureList(StructureList): """ Represents a type of structure list where the order is not important. """ def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "UnorderedStructureList", "structures": [structure.toJSON() for structure in self.structures] } class OrderedStructureList(StructureList): """ Represents a type of structure list where the order is important. """ def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "OrderedStructureList", "structures": [structure.toJSON() for structure in self.structures] } class StructureChoice(StructureList): """ Represents a type of structure list where only one of the structures can be used. """ def toJSON(self): """ Converts this object to a dictionary, which can then be easily exported as JSON. Used for development purposes. Returns ------- A dictionary representing this object. """ return { "type": "StructureChoice", "structures": [structure.toJSON() for structure in self.structures] }
/schematacode-1.0.1.tar.gz/schematacode-1.0.1/schemata/structures.py
0.909902
0.677607
structures.py
pypi
import os import logging import re from lxml.etree import ElementTree as XMLElementTree, Element as XMLElement, SubElement as XMLSubelement, Comment as XMLComment, QName, indent import json from schemata.structures import * logger = logging.getLogger("schemata.parser") """ This module contains the Schemata parser. This module contains the code that takes Schemata syntax - a relatively compact syntax - and turns it into a set of objects representing the schema. From there, the schema can be exported to XSD, or Schematron, or JSON Schemas, or auto-generated documentation, depending on what's relevant. The parsing is done completely in one step. The parser scans the document and constructs the objects in one go. """ class Marker(object): def __init__(self): self.position = 0 def copy(self): marker = Marker() marker.position = self.position return marker def cut(text, startIndex, length=1): a = startIndex b = startIndex + length return text[a:b] class SchemataParsingError(Exception): def __init__(self, message): super().__init__(message) class Parser(object): _propertyNameCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-" _referenceCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-" _operators = ["=", ">", ">=", "<", "<=", "/="] _negatedOperators = ["=", "<", "<=", ">", ">=", "/="] _propertyNames = [ "baseType", "tagName", "allowedPattern", "allowedValues", "minimumValue", "maximumValue", "defaultValue", "valueType", "attributes", "allowedContent", "itemType", "properties", "isSelfClosing", "lineBreaks" ] def __init__(self): pass def parseSchemaFromFile(self, filePath): with open(filePath, "r") as fileObject: text = fileObject.read() schema = self.parseSchema(text, filePath) return schema def parseSchema(self, inputText, filePath = ""): logger.debug("Attempting to parse schema.") marker = Marker() schema = Schema() self._parseWhiteSpace(inputText, marker) logger.debug("Attempting to parse schema metadata.") metadata = self._parseComment(inputText, marker) if metadata != None: logger.debug("Found metadata comment.") m = re.search(r"Format Name:\s*(.+)\n", metadata) if m != None: schema.formatName = m.group(1).strip() logger.debug(f"Got format name '{schema.formatName}'.") importStatements = self._parseImportStatements(inputText, marker, schema) for i in importStatements: path = os.path.join(os.path.dirname(filePath), i) if not os.path.exists(path): raise Exception(f"'{path}' does not exist.") s = self.parseSchemaFromFile(path) schema.dependencies.append(s) schema.structures = self._parseStructures(inputText, marker, schema) for structure in schema.structures: if isinstance(structure, ElementStructure) and isinstance(structure.allowedContent, ElementUsageReference): s = structure.allowedContent.elementStructure if isinstance(s, DataStructure): structure.valueTypeReference = s.reference ds = DataUsageReference() ds.schema = schema ds.dataStructureReference = s.reference structure.allowedContent =ds listDataStructures = [] for structure in schema.structures: if isinstance(structure, AttributeStructure) and isinstance(structure.dataStructureReference, ListFunction): lf = structure.dataStructureReference ds1 = lf.dataStructure ds1.setIsUsed() pattern = "" if ds1.allowedValues: pattern = "|".join(ds1.allowedValues) elif ds1.allowedPattern != "": pattern = ds1.allowedPattern ds2 = DataStructure() ds2.schema = schema ds2.reference = f"list_of__{ds1.reference}" ds2.baseStructureReference = "string" ds2.allowedPattern = "({})(\s*{}\s*({}))*".format(pattern, lf.separator, pattern) logger.debug("Creating list structure '{}'.".format(ds2.reference)) structure.dataStructureReference = ds2.reference listDataStructures.append(ds2) logger.debug(f"Created {len(listDataStructures)} list data structures.") schema.structures += listDataStructures schema.setIsUsed() logger.debug("Schema structures: {}".format(", ".join([str(structure) for structure in schema.structures]))) return schema def _parseImportStatements(self, inputText, marker, schema = None): logger.debug("Getting import statements.") self._parseWhiteSpace(inputText, marker) importStatements = [] while marker.position < len(inputText): i = self._parseImportStatement(inputText, marker, schema) if i != None: importStatements.append(i) else: break logger.debug(f"{len(importStatements)} import statements found.") return importStatements def _parseImportStatement(self, inputText, marker, schema = None): logger.debug("Attempting to parse import statement.") self._parseWhiteSpace(inputText, marker) if cut(inputText, marker.position, 6) == "import": marker.position += 6 self._parseWhiteSpace(inputText, marker) path = self._parseString(inputText, marker) if path == None or path == "": raise SchemataParsingError(f"Expected a path string at {marker.position}.") logger.debug(f"Found path '{path}'.") return path return None def _parseStructures(self, inputText, marker, schema = None): logger.debug("Attempting to parse structures.") structures = [] references = [] while marker.position < len(inputText): self._parseWhiteSpace(inputText, marker) self._parseComment(inputText, marker) structure = self._parseStructure(inputText, marker, schema) if structure != None: if structure.reference in references: raise SchemataParsingError(f"A structure with the reference '{structure.reference}' has already been defined.") structures.append(structure) references.append(structure.reference) logger.debug(f"Found {len(structures)} structures.") return structures def _parseStructure(self, inputText, marker, schema = None): logger.debug("Attempting to parse structure.") self._parseWhiteSpace(inputText, marker) if cut(inputText, marker.position, 8) == "dataType": marker.position += 8 logger.debug("Found data structure.") dataStructure = self._parseDataStructure(inputText, marker, schema) return dataStructure if cut(inputText, marker.position, 9) == "attribute": marker.position += 9 logger.debug("Found attribute structure.") attributeStructure = self._parseAttributeStructure(inputText, marker, schema) return attributeStructure if cut(inputText, marker.position, 4) == "root": marker.position += 4 self._parseWhiteSpace(inputText, marker) if cut(inputText, marker.position, 7) == "element": marker.position += 7 logger.debug("Found root element structure.") elementStructure = self._parseElementStructure(inputText, marker, schema) elementStructure.canBeRootElement = True return elementStructure elif cut(inputText, marker.position, 6) == "object": marker.position += 6 logger.debug("Found root object structure.") objectStructure = self._parseObjectStructure(inputText, marker, schema) objectStructure.canBeRootObject = True return objectStructure else: raise SchemataParsingError(f"Expected 'element' keyword at position {marker.position}.") if cut(inputText, marker.position, 7) == "element": marker.position += 7 logger.debug("Found element structure.") elementStructure = self._parseElementStructure(inputText, marker, schema) return elementStructure if cut(inputText, marker.position, 8) == "property": marker.position += 8 logger.debug("Found property structure.") propertyStructure = self._parsePropertyStructure(inputText, marker, schema) return propertyStructure if cut(inputText, marker.position, 5) == "array": marker.position += 5 logger.debug("Found array structure.") arrayStructure = self._parseArrayStructure(inputText, marker, schema) return arrayStructure if cut(inputText, marker.position, 6) == "object": marker.position += 6 logger.debug("Found object structure.") objectStructure = self._parseObjectStructure(inputText, marker, schema) return objectStructure return None def _parseDataStructure(self, inputText, marker, schema = None): """ Gets any data structure at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ dataStructure = DataStructure() dataStructure.schema = schema # Get the reference. self._parseWhiteSpace(inputText, marker) reference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) dataStructure.reference = reference # Expect the opening bracket. if cut(inputText, marker.position, 1) == "{": marker.position += 1 else: raise SchemataParsingError("Expected '{{' at position {}.".format(marker.position)) self._parseWhiteSpace(inputText, marker) # Get the metadata. metadata = self._parseComment(inputText, marker) if metadata != None: m1 = re.search(r"Description:\s*(.+)\n", metadata) m2 = re.search(r"Example Value:\s*(.+)\n", metadata) if m1 != None: dataStructure.metadata.description = m1.group(1).strip() if m2 != None: dataStructure.metadata.exampleValue = m2.group(1).strip() self._parseWhiteSpace(inputText, marker) # Step through the text looking for properties. while marker.position < len(inputText): p = self._parseProperty(inputText, marker, schema) if p == None: break else: if p[0] == "baseType": dataStructure.baseStructureReference = p[1] if p[0] == "allowedPattern": dataStructure.allowedPattern = p[1] if p[0] == "allowedValues": dataStructure.allowedValues = p[1] if p[0] == "minimumValue": dataStructure.minimumValue = p[1] if p[0] == "maximumValue": dataStructure.maximumValue = p[1] self._parseWhiteSpace(inputText, marker) # Expect the closing bracket. if cut(inputText, marker.position, 1) == "}": marker.position += 1 else: raise SchemataParsingError("Expected '}}' at position {}.".format(marker.position)) return dataStructure def _parseAttributeStructure(self, inputText, marker, schema = None): """ Gets any attribute structure at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ attributeStructure = AttributeStructure() attributeStructure.schema = schema # Get the reference. self._parseWhiteSpace(inputText, marker) reference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) attributeStructure.reference = reference # Expect the opening bracket. if cut(inputText, marker.position, 1) == "{": marker.position += 1 else: raise SchemataParsingError("Expected '{{' at position {}.".format(marker.position)) self._parseWhiteSpace(inputText, marker) # Get the metadata. metadata = self._parseComment(inputText, marker) if metadata != None: m1 = re.search(r"Description:\s*(.+)\n", metadata) m2 = re.search(r"Example Value:\s*(.+)\n", metadata) if m1 != None: attributeStructure.metadata.description = m1.group(1).strip() if m2 != None: attributeStructure.metadata.exampleValue = m2.group(1).strip() self._parseWhiteSpace(inputText, marker) # Step through the text looking for properties. while marker.position < len(inputText): p = self._parseProperty(inputText, marker, schema) if p == None: break else: if p[0] == "baseType": attributeStructure.baseStructureReference = p[1] if p[0] == "tagName": attributeStructure.attributeName = p[1] if p[0] == "valueType": attributeStructure.dataStructureReference = p[1] if p[0] == "defaultValue": attributeStructure.defaultValue = p[1] self._parseWhiteSpace(inputText, marker) # Expect the closing bracket. if cut(inputText, marker.position, 1) == "}": marker.position += 1 else: raise SchemataParsingError("Expected '}}' at position {}.".format(marker.position)) # If the attribute name has not been set explicitly, use the attribute structure reference. # This allows .schema files to be terse. if attributeStructure.attributeName == "": attributeStructure.attributeName = attributeStructure.reference return attributeStructure def _parseElementStructure(self, inputText, marker, schema = None): """ Gets any element structure at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ elementStructure = ElementStructure() elementStructure.schema = schema # Get the reference. self._parseWhiteSpace(inputText, marker) reference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) elementStructure.reference = reference # Expect the opening bracket. if cut(inputText, marker.position, 1) == "{": marker.position += 1 else: raise SchemataParsingError("Expected '{{' at position {}.".format(marker.position)) self._parseWhiteSpace(inputText, marker) # Get the metadata. metadata = self._parseComment(inputText, marker) if metadata != None: m1 = re.search(r"Description:\s*(.+)\n", metadata) m2 = re.search(r"Example Value:\s*(.+)\n", metadata) if m1 != None: elementStructure.metadata.description = m1.group(1).strip() if m2 != None: elementStructure.metadata.exampleValue = m2.group(1).strip() self._parseWhiteSpace(inputText, marker) # Step through the text looking for properties. while marker.position < len(inputText): p = self._parseProperty(inputText, marker, schema) if p == None: break else: if p[0] == "baseType": elementStructure.baseStructureReference = p[1] if p[0] == "tagName": elementStructure.elementName = p[1] if p[0] == "attributes": elementStructure.attributes = p[1] if p[0] == "allowedContent": elementStructure.allowedContent = p[1] if p[0] == "isSelfClosing": elementStructure.isSelfClosing = p[1] if p[0] == "lineBreaks": elementStructure.lineBreaks = p[1] self._parseWhiteSpace(inputText, marker) # Expect the closing bracket. if cut(inputText, marker.position, 1) == "}": marker.position += 1 else: raise SchemataParsingError("Expected '}}' at position {}.".format(marker.position)) # If the element name has not been set explicitly, use the element structure reference. # This allows .schema files to be terse. if elementStructure.elementName == "": elementStructure.elementName = elementStructure.reference return elementStructure def _parsePropertyStructure(self, inputText, marker, schema = None): """ Gets any property structure at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ propertyStructure = PropertyStructure() propertyStructure.schema = schema # Get the reference. self._parseWhiteSpace(inputText, marker) reference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) propertyStructure.reference = reference # Expect the opening bracket. if cut(inputText, marker.position, 1) == "{": marker.position += 1 else: raise SchemataParsingError("Expected '{{' at position {}.".format(marker.position)) self._parseWhiteSpace(inputText, marker) # Get the metadata. metadata = self._parseComment(inputText, marker) if metadata != None: m1 = re.search(r"Description:\s*(.+)\n", metadata) m2 = re.search(r"Example Value:\s*(.+)\n", metadata) if m1 != None: propertyStructure.metadata.description = m1.group(1).strip() if m2 != None: propertyStructure.metadata.exampleValue = m2.group(1).strip() self._parseWhiteSpace(inputText, marker) # Step through the text looking for properties. while marker.position < len(inputText): p = self._parseProperty(inputText, marker, schema) if p == None: break else: if p[0] == "tagName": propertyStructure.propertyName = p[1] if p[0] == "valueType": propertyStructure.valueTypeReference = p[1] self._parseWhiteSpace(inputText, marker) # Expect the closing bracket. if cut(inputText, marker.position, 1) == "}": marker.position += 1 else: raise SchemataParsingError("Expected '}}' at position {}.".format(marker.position)) # If the property name has not been set explicitly, use the property structure reference. # This allows .schema files to be terse. if propertyStructure.propertyName == "": propertyStructure.propertyName = propertyStructure.reference return propertyStructure def _parseArrayStructure(self, inputText, marker, schema = None): """ Gets any array structure at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ arrayStructure = ArrayStructure() arrayStructure.schema = schema # Get the reference. self._parseWhiteSpace(inputText, marker) reference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) arrayStructure.reference = reference # Expect the opening bracket. if cut(inputText, marker.position, 1) == "{": marker.position += 1 else: raise SchemataParsingError("Expected '{{' at position {}.".format(marker.position)) self._parseWhiteSpace(inputText, marker) # Get the metadata. metadata = self._parseComment(inputText, marker) if metadata != None: m1 = re.search(r"Description:\s*(.+)\n", metadata) m2 = re.search(r"Example Value:\s*(.+)\n", metadata) if m1 != None: arrayStructure.metadata.description = m1.group(1).strip() if m2 != None: arrayStructure.metadata.exampleValue = m2.group(1).strip() self._parseWhiteSpace(inputText, marker) # Step through the text looking for properties. while marker.position < len(inputText): p = self._parseProperty(inputText, marker, schema) if p == None: break else: if p[0] == "baseType": arrayStructure.baseStructureReference = p[1] if p[0] == "itemType": arrayStructure.itemTypeReference = p[1] self._parseWhiteSpace(inputText, marker) # Expect the closing bracket. if cut(inputText, marker.position, 1) == "}": marker.position += 1 else: raise SchemataParsingError("Expected '}}' at position {}.".format(marker.position)) return arrayStructure def _parseObjectStructure(self, inputText, marker, schema = None): """ Gets any object structure at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ objectStructure = ObjectStructure() objectStructure.schema = schema # Get the reference. self._parseWhiteSpace(inputText, marker) reference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) objectStructure.reference = reference # Expect the opening bracket. if cut(inputText, marker.position, 1) == "{": marker.position += 1 else: raise SchemataParsingError("Expected '{{' at position {}.".format(marker.position)) self._parseWhiteSpace(inputText, marker) # Get the metadata. metadata = self._parseComment(inputText, marker) if metadata != None: m1 = re.search(r"Description:\s*(.+)\n", metadata) m2 = re.search(r"Example Value:\s*(.+)\n", metadata) if m1 != None: objectStructure.metadata.description = m1.group(1).strip() if m2 != None: objectStructure.metadata.exampleValue = m2.group(1).strip() self._parseWhiteSpace(inputText, marker) # Step through the text looking for properties. while marker.position < len(inputText): p = self._parseProperty(inputText, marker, schema) if p == None: break else: if p[0] == "baseType": objectStructure.baseStructureReference = p[1] if p[0] == "properties": objectStructure.properties = p[1] self._parseWhiteSpace(inputText, marker) # Expect the closing bracket. if cut(inputText, marker.position, 1) == "}": marker.position += 1 else: raise SchemataParsingError("Expected '}}' at position {}.".format(marker.position)) return objectStructure def _parseProperty(self, inputText, marker, schema = None): """ Gets any property at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ logger.debug("Attempting to parse structure property.") self._parseWhiteSpace(inputText, marker) # Get the property name. propertyName = self._parsePropertyName(inputText, marker) # If there is no property name, there is no property, so return None. if propertyName == None: return None logger.debug(f"Found property name '{propertyName}'.") # If the property name is not a Schemata property name, raise an exception. if propertyName not in Parser._propertyNames: raise SchemataParsingError(f"'{propertyName}' is not a valid Schemata property name.") self._parseWhiteSpace(inputText, marker) # A colon must follow for it to be a property. if cut(inputText, marker.position) == ":": marker.position += 1 else: return None self._parseWhiteSpace(inputText, marker) propertyValue = None # Get the property value. If the type of the property value is wrong, raise an exception. if propertyName == "baseType": propertyValue = self._parseReference(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a reference for property '{propertyName}'.") if propertyName == "tagName": propertyValue = self._parseString(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a string for property '{propertyName}'.") if propertyName == "allowedPattern": propertyValue = self._parseString(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a string for property '{propertyName}'.") if propertyName == "allowedValues": propertyValue = self._parseList(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a list of values for property '{propertyName}'.") if propertyName == "minimumValue": propertyValue = self._parseInteger(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected an integer for property '{propertyName}'.") if propertyName == "maximumValue": propertyValue = self._parseInteger(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected an integer for property '{propertyName}'.") if propertyName == "defaultValue": propertyValue = self._parseString(inputText, marker) if propertyValue == None: propertyValue = self._parseInteger(inputText, marker) if propertyValue == None: propertyValue = self._parseBoolean(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a string, integer, or boolean for property '{propertyName}'.") if propertyName == "valueType": propertyValue = self._parseListFunction(inputText, marker, schema) if propertyValue == None: propertyValue = self._parseReference(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a reference for property '{propertyName}'.") if propertyName == "itemType": propertyValue = self._parseReference(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a reference for property '{propertyName}'.") if propertyName == "attributes": propertyValue = self._parseList(inputText, marker, "attributeUsageReference", schema) if propertyValue == None: raise SchemataParsingError(f"Expected an attribute usage reference list for property '{propertyName}'.") if propertyName == "properties": propertyValue = self._parseList(inputText, marker, "propertyUsageReference", schema) if propertyValue == None: raise SchemataParsingError(f"Expected a property usage reference list for property '{propertyName}'.") if propertyName == "allowedContent": propertyValue = self._parseSubelementUsages(inputText, marker, schema) if propertyValue == None: raise SchemataParsingError(f"Expected a structure usage reference or structure list for property '{propertyName}'.") if propertyName == "isSelfClosing": propertyValue = self._parseBoolean(inputText, marker) if propertyValue == None: raise SchemataParsingError(f"Expected a boolean for property '{propertyName}'.") if propertyName == "lineBreaks": propertyValue = self._parseList(inputText, marker, "integer") if propertyValue == None: raise SchemataParsingError(f"Expected a list of integers for property '{propertyName}'.") if propertyValue == None: raise SchemataParsingError(f"Expected a value for property '{propertyName}'.") self._parseWhiteSpace(inputText, marker) logger.debug("Found property value '{}'.".format(propertyValue)) # Expect semi-colon. if cut(inputText, marker.position) == ";": marker.position += 1 else: raise SchemataParsingError(f"Expected ';' at position {marker.position}.") return (propertyName, propertyValue) def _parseListFunction(self, inputText, marker, schema = None): """ Gets any list function at the current position and returns it. List functions can be used to automatically create new data types where the data structure is a list of another data type's enumerations. This prevents the user from having to write complex patterns in the .schema file. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ logger.debug("Attempting to parse list function.") self._parseWhiteSpace(inputText, marker) # List functions must start with 'list'. if cut(inputText, marker.position, 4) == "list": marker.position += 4 else: return None self._parseWhiteSpace(inputText, marker) # Expect the opening bracket. if cut(inputText, marker.position, 1) == "(": marker.position += 1 else: raise SchemataParsingError(f"Expected '(' at position {marker.position}.") self._parseWhiteSpace(inputText, marker) # Expect a reference. reference = self._parseReference(inputText, marker) if reference == None: raise SchemataParsingError(f"Expected a reference at position {marker.position}.") self._parseWhiteSpace(inputText, marker) # Expect a comma. if cut(inputText, marker.position, 1) == ",": marker.position += 1 else: raise SchemataParsingError(f"Expected ',' at position {marker.position}.") self._parseWhiteSpace(inputText, marker) # Expect a string that denotes the separator. separator = self._parseString(inputText, marker) if separator == None: raise SchemataParsingError(f"Expected a string at position {marker.position}.") self._parseWhiteSpace(inputText, marker) # Expect the closing bracket. if cut(inputText, marker.position, 1) == ")": marker.position += 1 else: raise SchemataParsingError(f"Expected ')' at position {marker.position}.") listFunction = ListFunction(reference, separator) listFunction.schema = schema logger.debug(f"Found list function: {listFunction}.") return listFunction def _parseSubelementUsages(self, inputText, marker, schema = None): """ Gets any subelement usage at the current position and returns it. A subelement usage can include an element usage reference, a data usage reference, an any elements usage reference, an any text usage reference, an ordered structure list, an unordered structure list, or a structure choice. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ # Look for the different things that can be subelement usages, and return if found. item = self._parseElementUsageReference(inputText, marker, schema) if item != None: return item logger.debug("Didn't find element usage reference.") item = self._parseAnyElementsUsageReference(inputText, marker, schema) if item != None: return item logger.debug("Didn't find any elements usage reference.") item = self._parseAnyTextUsageReference(inputText, marker, schema) if item != None: return item logger.debug("Didn't find any text usage reference.") item = self._parseSubelementList(inputText, marker, schema) if item != None: return item logging.debug("Didn't find subelement list.") return None def _parseSubelementList(self, inputText, marker, schema = None): """ Gets any subelement / substructure list at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ logger.debug("Attempting to parse a subelement list.") # Copy the marker, as we're going to use other functions that will edit the marker. m = marker.copy() self._parseWhiteSpace(inputText, m) self._parseComment(inputText, m) self._parseWhiteSpace(inputText, m) # The bracket type and the separator type determine what kind of list this is. bracketType = "" separatorType = "comma" if cut(inputText, m.position) == "{": bracketType = "recurve" m.position += 1 elif cut(inputText, m.position) == "[": bracketType = "square" m.position += 1 else: return None logger.debug(f"Identified bracket type: {bracketType}.") self._parseWhiteSpace(inputText, m) self._parseComment(inputText, m) self._parseWhiteSpace(inputText, m) items = [] n = 0 # Step through the text looking for list items. while m.position < len(inputText): self._parseWhiteSpace(inputText, m) self._parseComment(inputText, m) self._parseWhiteSpace(inputText, m) # There should be a separator character between each list item. if n > 0: c = cut(inputText, m.position) # The first separator used sets up what separator to expect for the rest of the list. if n == 1: if c == ",": separatorType = "comma" m.position += 1 elif c == "/": separatorType = "slash" # Slashes can only be used with recurve brackets. if bracketType == "square": raise SchemataParsingError(f"Expected ',' at position {m.position}.") m.position += 1 elif n > 1: if (separatorType == "comma" and c == ",") or (separatorType == "slash" and c == "/"): m.position += 1 elif (separatorType == "comma" and c == "/") or (separatorType == "slash" and c == ","): # If the separator type is not consistent throughout the list, raise an exception. raise SchemataParsingError(f"Separators must be the same throughout a list (position {m.position}).") else: break self._parseWhiteSpace(inputText, m) self._parseComment(inputText, m) self._parseWhiteSpace(inputText, m) # Try to get an item. item = self._parseSubelementUsages(inputText, m, schema) # If no item is found, break the loop. if item == None: break items.append(item) n += 1 logger.debug(f"Identified separator type: {separatorType}.") logger.debug(f"List: {items}.") self._parseWhiteSpace(inputText, m) self._parseComment(inputText, m) self._parseWhiteSpace(inputText, m) # Check for closing bracket. c = cut(inputText, m.position) if bracketType == "recurve" and c == "}": m.position += 1 elif bracketType == "square" and c == "]": m.position += 1 else: # If no closing bracket or the wrong closing bracket is found, raise an exception. raise SchemataParsingError(f"Expected closing bracket at position {m.position}.") # Make the list object. if bracketType == "square" and separatorType == "comma": l = OrderedStructureList() l.schema = schema l.structures = items elif bracketType == "recurve" and separatorType == "comma": l = UnorderedStructureList() l.schema = schema l.structures = items elif bracketType == "recurve" and separatorType == "slash": l = StructureChoice() l.schema = schema l.structures = items else: return None logger.debug(f"Found subelement list {l}.") # Update the original marker. marker.position = m.position return l def _parseAttributeUsageReference(self, inputText, marker, schema = None): """ Gets an attribute usage reference at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ logger.debug("Attempting to parse attribute usage reference.") # First get the reference. self._parseWhiteSpace(inputText, marker) attributeStructureReference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) if attributeStructureReference == None: return None attributeUsageReference = AttributeUsageReference() attributeUsageReference.schema = schema attributeUsageReference.attributeStructureReference = attributeStructureReference # Get the information in brackets, if there is any. if cut(inputText, marker.position) == "(": marker.position += 1 self._parseWhiteSpace(inputText, marker) # Work out if this is an optional attribute. if cut(inputText, marker.position, 8) == "optional": marker.position += 8 attributeUsageReference.isOptional = True self._parseWhiteSpace(inputText, marker) # If there's not a closing bracket, raise an exception. if cut(inputText, marker.position) == ")": marker.position += 1 else: raise SchemataParsingError(f"Expected ')' at position {marker.position}.") else: # If there's nothing in the brackets, raise an exception. raise SchemataParsingError(f"Expected keyword at position {marker.position}.") return attributeUsageReference def _parseElementUsageReference(self, inputText, marker, schema = None): """ Gets an element usage reference at the current position and returns it. This will also pick up data usage references, as the two are indistinguishable. Sorting the element usage references from the data usage references is dealt with later in the parsing process. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ logger.debug("Attempting to parse element usage reference.") # First get the reference. self._parseWhiteSpace(inputText, marker) elementStructureReference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) if elementStructureReference == None: return None elementUsageReference = ElementUsageReference() elementUsageReference.schema = schema elementUsageReference.elementStructureReference = elementStructureReference # Get the information in the brackets, if there is any. if cut(inputText, marker.position) == "(": marker.position += 1 self._parseWhiteSpace(inputText, marker) elementUsageReference.minimumNumberOfOccurrences = 0 elementUsageReference.maximumNumberOfOccurrences = -1 nExpression = self._parseNExpression(inputText, marker) if nExpression != None: elementUsageReference.nExpression = nExpression # If there's not a closing bracket, raise an exception. if cut(inputText, marker.position) == ")": marker.position += 1 else: raise SchemataParsingError(f"Expected ')' at position {marker.position}.") elif cut(inputText, marker.position, 8) == "optional": marker.position += 8 elementUsageReference.nExpression = [(">=", 0), ("<=", 1)] self._parseWhiteSpace(inputText, marker) # If there's not a closing bracket, raise an exception. if cut(inputText, marker.position) == ")": marker.position += 1 else: raise SchemataParsingError(f"Expected ')' at position {marker.position}.") else: # If there's nothing in the brackets, raise an exception. raise SchemataParsingError(f"Expected expression or keyword at position {marker.position}.") # Apply the n-expression. if elementUsageReference.nExpression != None: for comparison in elementUsageReference.nExpression: if comparison[0] == ">=": elementUsageReference.minimumNumberOfOccurrences = comparison[1] if comparison[0] == ">": elementUsageReference.minimumNumberOfOccurrences = comparison[1] + 1 if comparison[0] == "<=": elementUsageReference.maximumNumberOfOccurrences = comparison[1] if comparison[0] == "<": elementUsageReference.maximumNumberOfOccurrences = comparison[1] - 1 if comparison[0] == "=": elementUsageReference.minimumNumberOfOccurrences = comparison[1] elementUsageReference.maximumNumberOfOccurrences = comparison[1] return elementUsageReference def _parsePropertyUsageReference(self, inputText, marker, schema = None): """ Gets a property usage reference at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ logger.debug("Attempting to parse property usage reference.") # First get the reference. self._parseWhiteSpace(inputText, marker) propertyStructureReference = self._parseReference(inputText, marker) self._parseWhiteSpace(inputText, marker) if propertyStructureReference == None: return None propertyUsageReference = PropertyUsageReference() propertyUsageReference.schema = schema propertyUsageReference.propertyStructureReference = propertyStructureReference # Get the information in brackets, if there is any. if cut(inputText, marker.position) == "(": marker.position += 1 self._parseWhiteSpace(inputText, marker) # Work out if this is an optional attribute. if cut(inputText, marker.position, 8) == "optional": marker.position += 8 propertyUsageReference.isOptional = True self._parseWhiteSpace(inputText, marker) # If there's not a closing bracket, raise an exception. if cut(inputText, marker.position) == ")": marker.position += 1 else: raise SchemataParsingError(f"Expected ')' at position {marker.position}.") else: # If there's nothing in the brackets, raise an exception. raise SchemataParsingError(f"Expected keyword at position {marker.position}.") return propertyUsageReference def _parseAnyAttributesUsageReference(self, inputText, marker, schema = None): """ Gets an any attributes usage reference at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ if cut(inputText, marker.position, 16) == "*any attributes*": marker.position += 16 ur = AnyAttributesUsageReference() ur.schema = schema return schema return None def _parseAnyElementsUsageReference(self, inputText, marker, schema = None): """ Gets an any elements usage reference at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ if cut(inputText, marker.position, 14) == "*any elements*": marker.position += 14 ur = AnyElementsUsageReference() ur.schema = schema return ur return None def _parseAnyTextUsageReference(self, inputText, marker, schema = None): """ Gets an any text usage reference at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ if cut(inputText, marker.position, 10) == "*any text*": marker.position += 10 ur = AnyTextUsageReference() ur.schema = schema return ur return None def _parseAnyPropertiesUsageReference(self, inputText, marker, schema = None): """ Gets an any properties usage reference at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing schema : Schema The schema object being created """ if cut(inputText, marker.position, 16) == "*any properties*": marker.position += 16 ur = AnyPropertiesUsageReference() ur.schema = schema return schema return None def _parseNExpression(self, inputText, marker): """ Gets any n-expression (an expression of the form 'n > 0' or '0 > n > 3') at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ logger.debug("Attempting to parse n-expression.") # Start by trying to find something of the form '0 <'. self._parseWhiteSpace(inputText, marker) n1 = self._parseInteger(inputText, marker) o1 = None self._parseWhiteSpace(inputText, marker) if n1 != None: o1 = self._parseOperator(inputText, marker) # If the expression starts with a number, an operator must follow. if o1 == None: raise SchemataParsingError(f"Expected an operator at position {marker.position}.") self._parseWhiteSpace(inputText, marker) # Check for the variable - 'n'. if cut(inputText, marker.position) == "n": marker.position += 1 else: # If nothing has been found so far, then there is no n-expression, so return None. # If a number and operator have been found, but not an 'n', then the syntax is wrong, so raise an exception. if n1 == None and o1 == None: return None else: raise SchemataParsingError(f"Expected 'n' at position {marker.position}.") # Look for an operator and number after the 'n'. self._parseWhiteSpace(inputText, marker) o2 = self._parseOperator(inputText, marker) if o2 == None: raise SchemataParsingError(f"Expected an operator at position {marker.position}.") self._parseWhiteSpace(inputText, marker) n2 = self._parseInteger(inputText, marker) if n2 == None: raise SchemataParsingError(f"Expected a number at position {marker.position}.") e = [] # Operators before the 'n' must be reversed. if n1 != None and o1 != None: i = self._operators.index(o1) o1b = self._negatedOperators[i] e += [(o1b, n1)] e += [(o2, n2)] return e def _parseList(self, inputText, marker, objectType = "string", schema = None): """ Gets any list (of strings, integers, booleans, et cetera) at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing objectType : str The type of object to expect in the list """ logger.debug("Attempting to parse list.") items = [] n = 0 # Keep trying to find items in the list until you find something that's not a valid list item. while marker.position < len(inputText): self._parseWhiteSpace(inputText, marker) # Expect a comma between each of the items in the list. if n > 0: if cut(inputText, marker.position) == ",": marker.position += 1 else: break self._parseWhiteSpace(inputText, marker) item = None # Check to see if the expected object is present. if objectType == "string": item = self._parseString(inputText, marker) if objectType == "integer": item = self._parseInteger(inputText, marker) if objectType == "boolean": item = self._parseBoolean(inputText, marker) if objectType == "attributeUsageReference": item = self._parseAttributeUsageReference(inputText, marker, schema) if objectType == "propertyUsageReference": item = self._parsePropertyUsageReference(inputText, marker, schema) # If an item of the right type is not found, break the loop. if item == None: break items.append(item) n += 1 # If no items were found, no list was found, so return None. if n == 0: return None logger.debug(f"Found list {items}.") return items # Parsing of basic structures starts here. def _parsePropertyName(self, inputText, marker): """ Gets any property name at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ logger.debug("Attempting to parse property name.") t = "" # Step through the text and check if the current character is a valid property name character. while marker.position < len(inputText): c = cut(inputText, marker.position) # If the current character is a valid property name character, add it to the temporary variable. Otherwise, break the loop. if c in Parser._propertyNameCharacters: t += c marker.position += 1 else: break # If no property name was found, return None. if len(t) == 0: return None logger.debug(f"Found property name '{t}'.") return t def _parseReference(self, inputText, marker): """ Gets any reference at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ t = "" # Step through the text to see if each character is a valid reference character. while marker.position < len(inputText): c = cut(inputText, marker.position) # If the current character is a valid reference character, add it to the temporary variable. Otherwise, break the loop. if c in Parser._referenceCharacters: t += c marker.position += 1 else: break # If nothing was found, return None. if len(t) == 0: return None logger.debug(f"Found reference '{t}'.") return t def _parseOperator(self, inputText, marker): """ Gets any operator at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ # Sort the operators so that we check for the longest one first. operators = sorted(self._operators, key= lambda o: len(o), reverse=True) # Go through the list of operators. for operator in operators: # Check if the operator is at the current position. if cut(inputText, marker.position, len(operator)) == operator: marker.position += len(operator) return operator # If no operator is found, return None. return None def _parseString(self, inputText, marker): """ Gets any string at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ t = "" quoteMarkType = "" foundClosingQuoteMark = False # Strings in .schema files can start with either single or double quote marks. Check to see if the current character is either. if cut(inputText, marker.position) == "'": quoteMarkType = "single" marker.position += 1 elif cut(inputText, marker.position) == "\"": quoteMarkType = "double" marker.position += 1 else: # If the current character isn't a single or double quote mark, then there is no string, so return None. return None # Step through the text and look for the closing quote mark. while marker.position < len(inputText): c = cut(inputText, marker.position) # If the closing quote mark is found, exit the loop. Otherwise, add the character to the temporary variable. if (quoteMarkType == "single" and c == "'") or (quoteMarkType == "double" and c == "\""): marker.position += 1 foundClosingQuoteMark = True break else: t += c marker.position += 1 # If no closing quote mark is found, then the .schema file syntax is wrong, so raise an exception. if not foundClosingQuoteMark: quoteMark = "'" if quoteMarkType == "single" else "\"" raise SchemataParsingError(f"Expected {quoteMark} at position {marker.position}.") return t def _parseInteger(self, inputText, marker): """ Gets any integer at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ t = "" # Step through the text and check if the characters are digits. while marker.position < len(inputText): c = cut(inputText, marker.position) if c in "0123456789": t += c # If the current character is a digit, move the marker along by 1. marker.position += 1 else: break # If no digits are found, return None. if len(t) == 0: return None return int(t) def _parseBoolean(self, inputText, marker): """ Gets any boolean at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ # If either 'true' or 'false' is found, return a boolean value. if cut(inputText, marker.position, 4) == "true": marker.position += 4 return True elif cut(inputText, marker.position, 5) == "false": marker.position += 5 return False # Otherwise return None. return None def _parseComment(self, inputText, marker): """ Gets any comment at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ # Check for the opening comment token. if cut(inputText, marker.position, 2) == "/*": marker.position += 2 t = "" foundClosingTag = False # Step through the text and look for the closing comment token. while marker.position < len(inputText): if cut(inputText, marker.position, 2) == "*/": marker.position += 2 foundClosingTag = True break else: # Until the closing comment token is found, add any text to the temporary variable. t += cut(inputText, marker.position) marker.position += 1 # If no closing comment token is found, raise an exception. if not foundClosingTag: raise SchemataParsingError(f"Expected '*/' at position {marker.position}.") return t else: # If no comment is found, return None. return None def _parseWhiteSpace(self, inputText, marker): """ Gets any white space at the current position and returns it. Parameters ---------- inputText : str The text being parsed marker : Marker A marker denoting the position at which to start parsing """ t = "" # Step through the text and check if it is white space. while marker.position < len(inputText): c = cut(inputText, marker.position) if c in " \t\n": t += c # If the current character is white space, move the marker along by 1. marker.position += 1 else: break # If no white space is found, return None. if len(t) == 0: return None return t
/schematacode-1.0.1.tar.gz/schematacode-1.0.1/schemata/parser.py
0.567577
0.254868
parser.py
pypi
import argparse import datetime import re import sys from typing import Generator, List, Optional PYPROJECT_PATH = "pyproject.toml" CHANGELOG_PATH = "docs/changelog.rst" COMPARE_URL_PREFIX = "https://github.com/schemathesis/schemathesis/compare/" def _read_changelog() -> List[str]: with open(CHANGELOG_PATH) as f: return f.readlines() def _find_line_by_prefix(lines: List[str], prefix: str) -> Optional[int]: return next((i for i, line in enumerate(lines) if line.startswith(prefix)), None) def bump(new_version: str) -> None: today = datetime.datetime.now().strftime("%Y-%m-%d") # Read changelog into lines changelog = _read_changelog() # Find the position of the "Unreleased" block unreleased_idx = _find_line_by_prefix(changelog, "`Unreleased`_ -") if unreleased_idx is None: raise RuntimeError("Changelog has no `Unreleased` section") # Place to insert the new release block new_version_idx = unreleased_idx + 3 if changelog[new_version_idx].startswith(".. _v"): raise RuntimeError("New version has no changes") # Insert the new release block before the "Unreleased" block new_version_link = f".. _v{new_version}:\n\n" new_version_line = f"`{new_version}`_ - {today}" new_version_underline = f"\n{'-' * len(new_version_line)}\n\n" changelog.insert(new_version_idx, f"{new_version_link}{new_version_line}{new_version_underline}") # Find the position of the link for the "Unreleased" diff & rewrite it with the new version unreleased_diff_idx = _find_line_by_prefix(changelog, f".. _Unreleased: {COMPARE_URL_PREFIX}") if unreleased_diff_idx is None: raise RuntimeError("Changelog has no diff for the `Unreleased` section") changelog[unreleased_diff_idx] = f".. _Unreleased: {COMPARE_URL_PREFIX}v{new_version}...HEAD\n" # Extract the old version from the next line # `.. _3.18.2: ...` => `3.18.2` old_version_diff_idx = unreleased_diff_idx + 1 old_version = changelog[old_version_diff_idx].split(":")[0][4:] # Insert the diff for the new version new_version_diff = f".. _{new_version}: {COMPARE_URL_PREFIX}v{old_version}...v{new_version}\n" changelog.insert(old_version_diff_idx, new_version_diff) # Write the updated changelog back to the file with open(CHANGELOG_PATH, "w") as f: f.writelines(changelog) # Update `pyproject.toml` with open(PYPROJECT_PATH) as f: pyproject = f.readlines() version_idx = _find_line_by_prefix(pyproject, f'version = "{old_version}"') if version_idx is None: raise RuntimeError("`pyproject.toml` has no `version` field") pyproject[version_idx] = f'version = "{new_version}"\n' with open(PYPROJECT_PATH, "w") as f: f.writelines(pyproject) def to_markdown(version: str) -> None: changelog = _read_changelog() # Find the start and end lines for the provided version start_idx = _find_line_by_prefix(changelog, f".. _v{version}") if start_idx is None: raise RuntimeError(f"Changelog misses the {version} version") start_idx += 4 # Skip the version link + version line and its underline end_idx = _find_line_by_prefix(changelog[start_idx + 1 :], ".. _v") if end_idx is None: raise RuntimeError("Changelog is missing the previous version") md_lines = _rst_to_md(changelog[start_idx : end_idx + start_idx]) sys.stdout.write("\n".join(md_lines)) sys.stdout.write("\n") def _format_section(section: str) -> str: emoji = { "Added": "rocket", "Changed": "wrench", "Deprecated": "wastebasket", "Fixed": "bug", "Performance": "racing_car", "Removed": "fire", }.get(section, "wrench") return f"\n### :{emoji}: {section}\n" # Matches strings that look like "`#123`_" GITHUB_LINK_RE = re.compile(r"`#([0-9]+)`_") def clean_line(text: str) -> str: return GITHUB_LINK_RE.sub(lambda m: m.group().strip("`_"), text).replace("``", "`") def _rst_to_md(lines: List[str]) -> Generator[str, None, None]: for line in lines: line = line.strip() if line.startswith("**"): section = line.strip("*") yield _format_section(section) elif line: yield clean_line(line) def build_parser() -> argparse.ArgumentParser: argument_parser = argparse.ArgumentParser(description="Manage Schemathesis changelog.") subparsers = argument_parser.add_subparsers(title="subcommands", dest="subcommand") # `bump` subcommand bump_parser = subparsers.add_parser("bump", help="Bump the version of the changelog") bump_parser.add_argument("new_version", type=str, help="The new version number to bump to") # `md` subcommand md_parser = subparsers.add_parser("md", help="Transform the changelog for a specific version into markdown style") md_parser.add_argument("version", type=str, help="The version to transform into markdown") return argument_parser if __name__ == "__main__": parser = build_parser() args = parser.parse_args() if args.subcommand == "bump": bump(args.new_version) elif args.subcommand == "md": to_markdown(args.version) else: parser.error("Missing subcommand")
/schemathesis-3.19.2.tar.gz/schemathesis-3.19.2/changelog.py
0.599954
0.165425
changelog.py
pypi
<p align="center"> <em>Discover API-breaking payloads, keep API documentation up-to-date, and increase confidence in your API</em> </p> <p align="center"> <a href="https://github.com/schemathesis/schemathesis/actions" target="_blank"> <img src="https://github.com/schemathesis/schemathesis/actions/workflows/build.yml/badge.svg" alt="Build"> </a> <a href="https://codecov.io/gh/schemathesis/schemathesis/branch/master" target="_blank"> <img src="https://codecov.io/gh/schemathesis/schemathesis/branch/master/graph/badge.svg" alt="Coverage"> </a> <a href="https://pypi.org/project/schemathesis/" target="_blank"> <img src="https://img.shields.io/pypi/v/schemathesis.svg" alt="Version"> </a> <a href="https://pypi.org/project/schemathesis/" target="_blank"> <img src="https://img.shields.io/pypi/pyversions/schemathesis.svg" alt="Python versions"> </a> <a href="https://discord.gg/R9ASRAmHnA" target="_blank"> <img src="https://img.shields.io/discord/938139740912369755" alt="Discord"> </a> <a href="https://opensource.org/licenses/MIT" target="_blank"> <img src="https://img.shields.io/pypi/l/schemathesis.svg" alt="License"> </a> </p> --- **Documentation**: <a href="https://schemathesis.readthedocs.io/en/stable/" target="_blank">https://schemathesis.readthedocs.io/en/stable/ </a> **Chat**: <a href="https://discord.gg/R9ASRAmHnA" target="_blank">https://discord.gg/R9ASRAmHnA </a> --- Schemathesis is a specification-based testing tool for OpenAPI and GraphQL apps based on the powerful <a href="https://hypothesis.works/" target="_blank">Hypothesis</a> framework. Here are the key features: - **OpenAPI & GraphQL**: Test a wide range of APIs with ease, regardless of the specification used. - **Positive & Negative Tests**: Ensure your API handles valid and invalid inputs, incl. unexpected ones. - **Stateful Testing**: Automatically generate sequences of API requests where subsequent requests build on previous ones for testing complex and interdependent scenarios. - **Session Replay**: Quickly store and replay test sessions to easily investigate and resolve issues. - **Targeted Testing**: Guide data generation towards specific metrics like response time or size. Uncover performance or resource usage issues and optimize API behavior under different conditions. - **Python Integration**: Utilize native ASGI/WSGI support for faster testing your Python applications. - **Customization**: Tune data generation, API response verification, and testing process to fit your needs. - **CI Integration**: Run tests on every code change with Docker image and [GitHub Action](https://github.com/schemathesis/action). ## Testimonials "_The world needs modern, spec-based API tests, so we can deliver APIs as-designed. Schemathesis is the right tool for that job._" <div>Emmanuel Paraskakis - <strong>Level 250</strong></div> --- "_Schemathesis is the only sane way to thoroughly test an API._" <div>Zdenek Nemec - <strong>superface.ai</strong></div> --- "_The tool is absolutely amazing as it can do the negative scenario testing instead of me and much faster! Before I was doing the same tests in Postman client. But it's much slower and brings maintenance burden._" <div>Luděk Nový - <strong>JetBrains</strong></div> --- "_Schemathesis is the best tool for fuzz testing of REST API on the market. We are at Red Hat use it for examining our applications in functional and integrations testing levels._" <div>Dmitry Misharov - <strong>RedHat</strong></div> --- ## How does it work? Schemathesis uses your API's schema to generate both valid and invalid test scenarios, helping you verify API compliance and catch potential issues. It also verifies examples from the schema itself. Schemathesis generates high quality, diverse test data based on novel techniques like [Swarm testing](https://dl.acm.org/doi/10.1145/2338965.2336763) or [Schema fuzzing](https://patricegodefroid.github.io/public_psfiles/fse2020.pdf), ensuring that your API is thoroughly tested and even the most elusive bugs are uncovered. It's a versatile tool that works with any language, as long as you have an API schema in a supported format. Learn more about how it works in our [research paper](https://arxiv.org/abs/2112.10328). ## Why use Schemathesis? 1. **Avoid Crashes**: Discover API-breaking payloads and avoid crashes, database corruption, and hangs. 2. **Keep API Documentation Up-to-Date**: With Schemathesis, you never have to worry about API consumers using outdated specifications or incorrect payload examples. 3. **Easy Debugging**: Schemathesis provides you with a detailed failure report, along with a single cURL command to help you reproduce the problem instantly. 4. **Increased Confidence in API Stability**: By thoroughly testing your API with Schemathesis, you can have peace of mind knowing that your API is functioning as intended. 5. **Thorough Testing Coverage**: Schemathesis generates a large number of scenarios to test your API against, giving you a comprehensive view of its behavior and potential issues. 6. **Time-Saving**: Schemathesis streamlines API testing, saving your time for other tasks. ## Getting started Schemathesis can be used as a CLI, a Python library, or as a [SaaS](https://schemathesis.io/?utm_source=github). - **CLI**: Quick and easy way to get started, for those who prefer the command line. - **Python Library**: More control and customization, for developers integrating with their codebase. - **SaaS**: No setup or installation, if you prefer an all-in-one solution with great visuals. Free tier included. ## Installation ```bash python -m pip install schemathesis ``` This command installs the `st` entrypoint. You can also use our Docker image without installing Schemathesis as a Python package: ```bash docker pull schemathesis/schemathesis:stable ``` ## Example ### Command line ```bash st run --checks all https://example.schemathesis.io/openapi.json # Or docker run schemathesis/schemathesis:stable \ run --checks all https://example.schemathesis.io/openapi.json ``` ![image](https://raw.githubusercontent.com/schemathesis/schemathesis/master/img/demo.gif) ### Python tests ```python import schemathesis schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") @schema.parametrize() def test_api(case): case.call_and_validate() ``` Choose CLI for simplicity or Python package for greater flexibility. Both options run extensive tests and report failures with reproduction instructions. 💡 See a complete working example project in the [/example](https://github.com/schemathesis/schemathesis/tree/master/example) directory.💡 ## GitHub Actions If you use GitHub Actions, there is a native [GitHub app](https://github.com/apps/schemathesis) that reports test results directly to your pull requests. ```yaml api-tests: runs-on: ubuntu-20.04 steps: # Runs Schemathesis tests with all checks enabled - uses: schemathesis/action@v1 with: # Your API schema location schema: "http://localhost:5000/api/openapi.json" # OPTIONAL. Your Schemathesis.io token token: ${{ secrets.SCHEMATHESIS_TOKEN }} ``` Check our [GitHub Action](https://github.com/schemathesis/action) for more details. ## Let's make it better together 🤝 We're always looking to make Schemathesis better, and your feedback is a crucial part of that journey! If you've got a few minutes, we'd love to hear your thoughts on your experience using Schemathesis. Just follow [this link](https://forms.gle/kJ4hSxc1Yp6Ga96t5) to let us know what you think 💬 Thanks for helping us make Schemathesis even better! 👍 ## Commercial support For assistance with integrating Schemathesis into your company workflows or improving its effectiveness, reach out to our support team at <a href="mailto:[email protected]">[email protected]</a>. Additionally, we offer commercial support for those looking for extra assurance and priority assistance. ## Contributing Any contribution to development, testing, or any other area is highly appreciated and useful to the project. For guidance on how to contribute to Schemathesis, see the [contributing guidelines](https://github.com/schemathesis/schemathesis/blob/master/CONTRIBUTING.rst). ## Additional content - [Deriving Semantics-Aware Fuzzers from Web API Schemas](https://arxiv.org/abs/2112.10328) by **@Zac-HD** and **@Stranger6667** - [An article](https://dygalo.dev/blog/schemathesis-property-based-testing-for-api-schemas/) about Schemathesis by **@Stranger6667** - [Effective API schemas testing](https://youtu.be/VVLZ25JgjD4) from DevConf.cz by **@Stranger6667** - [How to use Schemathesis to test Flask API in GitHub Actions](https://notes.lina-is-here.com/2022/08/04/schemathesis-docker-compose.html) by **@lina-is-here** - [Testing APIFlask with schemathesis](http://blog.pamelafox.org/2023/02/testing-apiflask-with-schemathesis.html) by **@pamelafox** - [A video](https://www.youtube.com/watch?v=9FHRwrv-xuQ) from EuroPython 2020 by **@hultner** - [Schemathesis tutorial](https://appdev.consulting.redhat.com/tracks/contract-first/automated-testing-with-schemathesis.html) with an accompanying [video](https://www.youtube.com/watch?v=4r7OC-lBKMg) by Red Hat - [Using Hypothesis and Schemathesis to Test FastAPI](https://testdriven.io/blog/fastapi-hypothesis/) by **@amalshaji** - [A tutorial](https://habr.com/ru/company/oleg-bunin/blog/576496/) (RUS) about Schemathesis by **@Stranger6667** ## License This project is licensed under the terms of the [MIT license](https://opensource.org/licenses/MIT).
/schemathesis-3.19.2.tar.gz/schemathesis-3.19.2/README.md
0.60871
0.938688
README.md
pypi
Authentication ============== In this section, we'll cover how to use Schemathesis to test APIs that require authentication. We'll start with the basics of setting authentication credentials manually using headers, cookies, and query strings. Then, we'll move on to more advanced topics, including HTTP Basic, Digest Authentication, custom authentication mechanisms, and reusing sessions in Python tests. Setting credentials ------------------- To set authentication credentials manually, you can pass a key-value pairs to Schemathesis when running tests. Here's an example command for setting a custom header or cookie using the CLI: .. code:: text st run -H "Authorization: Bearer TOKEN" ... st run -H "Cookie: session=SECRET" ... You can also provide multiple headers by using the ``-H`` option multiple times: .. code:: text st run -H "Authorization: Bearer TOKEN" -H "X-Session-Id: SECRET" ... .. note:: Query string authentication is not yet supported in the Schemathesis CLI, however, you can use custom authentication mechanisms to set authentication in a query string parameter. Details on how to do this are described in the :ref:`Custom Authentication <custom-auth>` section below. For Python tests you can set a header, cookie or a query parameter inside your test function: .. code-block:: python import schemathesis schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") @schema.parametrize() def test_api(case): # Header case.call_and_validate(headers={"Authorization": "Bearer TOKEN"}) # Cookie case.call_and_validate(cookies={"session": "SECRET"}) # Query parameter case.call_and_validate(params={"Api-Key": "KEY"}) Built-In Authentication mechanisms ---------------------------------- `HTTP Basic <https://datatracker.ietf.org/doc/html/rfc7617>`_ and `HTTP Digest <https://datatracker.ietf.org/doc/html/rfc7616>`_ are two common authentication schemes supported by Schemathesis out of the box. .. code:: text st run --auth user:pass --auth-type=basic ... st run --auth user:pass --auth-type=digest ... In Python tests, you can use the `requests <https://github.com/psf/requests>`_ library to send requests with HTTP Basic or HTTP Digest authentication. You can pass the authentication credentials using the ``auth`` arguments of the ``call`` or ``call_and_validate`` methods: .. code-block:: python import schemathesis from requests.auth import HTTPDigestAuth schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") @schema.parametrize() def test_api(case): # HTTP Basic case.call_and_validate(auth=("user", "password")) # HTTP Digest case.call_and_validate(auth=HTTPDigestAuth("user", "password")) .. _custom-auth: Custom Authentication --------------------- In addition to the built-in authentication options, Schemathesis also allows you to implement your own custom authentication mechanisms in Python. It can be useful if you are working with an API that uses a custom authentication method. This section will explain how to define custom authentication mechanisms and use them in CLI and Python tests. Implementation ~~~~~~~~~~~~~~ To implement a custom authentication mechanism, you need to create a Python class with two methods and plug it into Schemathesis. The two methods your class should contain are: - ``get``: This method should get the authentication data and return it. - ``set``: This method should modify the generated test sample so that it contains the authentication data. Here's an example of a simple custom authentication class. However, please note that this code alone will not work without the necessary registration steps, which will be described later in this section. .. code:: python import requests # This is a real endpoint, try it out! TOKEN_ENDPOINT = "https://example.schemathesis.io/api/token/" USERNAME = "demo" PASSWORD = "test" class MyAuth: def get(self, context): response = requests.post( TOKEN_ENDPOINT, json={"username": USERNAME, "password": PASSWORD}, ) data = response.json() return data["access_token"] def set(self, case, data, context): case.headers = case.headers or {} case.headers["Authorization"] = f"Bearer {data}" The ``get`` method sends a request to a token endpoint and returns the access token retrieved from the JSON response. The ``set`` method modifies the generated ``Case`` instance so that it contains the authentication data, adding an ``Authorization`` header with the retrieved token. The ``context`` argument contains a few attributes useful for the authentication process: - ``context.operation``. API operation that is currently being tested - ``context.app``. A Python application if the WSGI / ASGI integration is used Using in CLI ~~~~~~~~~~~~ To use your custom authentication mechanism in the Schemathesis CLI, you need to register it globally. Here's an example of how to do that: .. code:: python import schemathesis @schemathesis.auth() class MyAuth: # Here goes your implementation ... Put the code above to the ``hooks.py`` file and extend your command via the ``SCHEMATHESIS_HOOKS`` environment variable: .. code:: bash $ SCHEMATHESIS_HOOKS=hooks $ st run ... .. note:: You can take a look at how to extend CLI :ref:`here <extend-cli>` Using in Python tests ~~~~~~~~~~~~~~~~~~~~~ To use your custom authentication mechanism in Python tests, you also need to register it. The registration process is similar to the global registration for CLI, but instead, you can register your auth implementation at the schema or test level. The following example shows how to use auth only tests generated via the ``schema`` instance: .. code:: python import schemathesis schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") @schema.auth() class MyAuth: # Here goes your implementation ... And this one shows auth applied only to the ``test_api`` function: .. code:: python import schemathesis schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") class MyAuth: # Here goes your implementation ... @schema.auth(MyAuth) @schema.parametrize() def test_api(case): ... Conditional Authentication ~~~~~~~~~~~~~~~~~~~~~~~~~~ Schemathesis offers a way to apply authentication to only a specific set of API operations during testing. This is helpful when you need to test different authentication types for different API operations or when the API has a combination of authenticated and unauthenticated endpoints. Multiple filters can be combined and applied to include or exclude API operations based on exact values, regular expressions, or custom functions. Here is how you can apply auth to all API operations with the ``/users/`` path, but exclude the ``POST`` method. .. code:: python import schemathesis @schemathesis.auth().apply_to(path="/users/").skip_for(method="POST") class MyAuth: # Here goes your implementation ... schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") @schema.auth(MyAuth).apply_to(path="/users/").skip_for(method="POST") @schema.parametrize() def test_api(case): ... .. note:: This decorator syntax is supported only on Python 3.9+. For older Python versions you need to bind separate variables for each term. Basic rules: - ``apply_to`` applies authentication to all API operations that match the filter term - ``skip_for`` skips authentication for all API operations that match the filter term - All conditions within a filter term are combined with the ``AND`` logic - Each ``apply_to`` and ``skip_for`` term is combined with the ``OR`` logic - Both ``apply_to`` and ``skip_for`` use the same set of conditions as arguments Conditions: - ``path``: the path of the API operation without its ``basePath``. - ``method``: the upper-cased HTTP method of the API operation - ``name``: the name of the API operation, such as ``GET /users/`` or ``Query.getUsers`` - Each condition can take either a single string or a list of options as input - You can also use a regular expression to match the conditions by adding ``_regex`` to the end of the condition and passing a string or a compiled regex. Here are some examples for ``path``, other conditions works the same: .. code:: python import re import schemathesis schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") # Only `/users/` @schema.auth().apply_to(path="/users/") # Only `/users/` and `/orders/` @schema.auth().apply_to(path=["/users/", "/orders/"]) # Only paths starting with `/u` @schema.auth().apply_to(path_regex="^/u") # Only paths starting with `/u` case insensitive @schema.auth().apply_to(path_regex=re.compile("^/u", re.IGNORECASE)) # Only `GET /users/` or `POST /orders/` @schema.auth().apply_to( method="GET", path="/users/", ).apply_to( method="POST", path="/orders/", ) class MyAuth: # Here goes your implementation ... You can also use a custom function to determine whether to apply or skip authentication for a given operation. The function should take an ``AuthContext`` instance and return a boolean value. To use a custom function with ``apply_to`` or ``skip_for``, simply pass it as the first argument. For example: .. code:: python import schemathesis schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") def is_deprecated(ctx): return ctx.operation.definition.get("deprecated") is True # Skip auth for all deprecated API operations @schema.auth().skip_for(is_deprecated) class MyAuth: # Here goes your implementation ... Refreshing credentials ~~~~~~~~~~~~~~~~~~~~~~ By default, the authentication data from the ``get`` method is cached for a while (300 seconds by default). To customize the caching behavior, pass the ``refresh_interval`` argument to the ``auth`` / ``register`` / ``apply`` functions. This parameter specifies the number of seconds for which the authentication data will be cached after a non-cached ``get`` call. To disable caching completely, set ``refresh_interval`` to None. For example, the following code sets the caching time to 600 seconds: .. code:: python import schemathesis @schemathesis.auth(refresh_interval=600) class MyAuth: # Here goes your implementation ... WSGI / ASGI support ~~~~~~~~~~~~~~~~~~~ If you are testing a Python app, you might want to use the WSGI / ASGI integrations and get authentication data from your application instance directly. It could be done by using the ``context`` to get the application instance: **FastAPI**: .. code:: python from myapp import app from starlette_testclient import TestClient schema = schemathesis.from_asgi("/openapi.json", app=app) TOKEN_ENDPOINT = "/auth/token/" USERNAME = "demo" PASSWORD = "test" @schema.auth() class MyAuth: def get(self, context): client = TestClient(context.app) response = client.post( TOKEN_ENDPOINT, json={"username": USERNAME, "password": PASSWORD} ) return response.json()["access_token"] def set(self, case, data, context): case.headers = case.headers or {} case.headers["Authorization"] = f"Bearer {data}" **Flask**: .. code:: python from myapp import app import werkzeug schema = schemathesis.from_wsgi("/openapi.json", app=app) TOKEN_ENDPOINT = "/auth/token/" USERNAME = "demo" PASSWORD = "test" @schema.auth() class MyAuth: def get(self, context): client = werkzeug.Client(context.app) response = client.post( TOKEN_ENDPOINT, json={"username": USERNAME, "password": PASSWORD} ) return response.json["access_token"] def set(self, case, data, context): case.headers = case.headers or {} case.headers["Authorization"] = f"Bearer {data}" Refresh tokens ~~~~~~~~~~~~~~ As auth provider class can hold additional state, you can use it to implement more complex authentication flows. For example, you can use refresh tokens for authentication. .. code:: python import requests import schemathesis TOKEN_ENDPOINT = "https://auth.myapp.com/api/token/" REFRESH_ENDPOINT = "https://auth.myapp.com/api/refresh/" USERNAME = "demo" PASSWORD = "test" @schemathesis.auth() class MyAuth: def __init__(self): self.refresh_token = None def get(self, context): if self.refresh_token is not None: return self.refresh(context) return self.login(context) def login(self, context): response = requests.post( TOKEN_ENDPOINT, json={"username": USERNAME, "password": PASSWORD}, ) data = response.json() self.refresh_token = data["refresh_token"] return data["access_token"] def refresh(self, context): response = requests.post( REFRESH_ENDPOINT, headers={"Authorization": f"Bearer {self.refresh_token}"}, ) data = response.json() self.refresh_token = data["refresh_token"] return data["access_token"] def set(self, case, data, context): case.headers = case.headers or {} case.headers = {"Authorization": f"Bearer {data}"} Third-party implementation -------------------------- If you'd like to use an authentication mechanism that is not natively supported by Schemathesis, you can use third-party extensions to the ``requests`` library inside Schemathesis tests. You can pass a ``requests.auth.AuthBase`` subclass instance to ``auth.set_from_requests`` and Schemathesis will use it automatically for every request it makes during testing. .. important:: Note, that this feature works only over HTTP and Python's WSGI transport is not supported. Here is an example that uses the `requests-ntlm <https://github.com/requests/requests-ntlm>`_ library that supports the `NTLM HTTP Authentication <https://datatracker.ietf.org/doc/html/rfc4559>`_ protocol. .. code:: python import schemathesis from requests_ntlm import HttpNtlmAuth schemathesis.auth.set_from_requests(HttpNtlmAuth("domain\\username", "password")) .. note:: You'll need to load this code as any other hook for CLI. For Python tests it works similarly: .. code-block:: python import schemathesis from requests_ntlm import HttpNtlmAuth schema = schemathesis.from_uri("https://example.schemathesis.io/openapi.json") schema.auth.set_from_requests(HttpNtlmAuth("domain\\username", "password")) @schema.parametrize() def test_api(case): ... Custom test client in Python tests ---------------------------------- Sometimes you need to reuse the same test client across multiple tests to share authentication data or execute custom events during session startup or shutdown (such as establishing a database connection): .. code-block:: python from myapp import app from starlette_testclient import TestClient schema = schemathesis.from_asgi("/openapi.json", app=app) @schema.parametrize() def test_api(case): with TestClient(app) as session: case.call_and_validate(session=session)
/schemathesis-3.19.2.tar.gz/schemathesis-3.19.2/docs/auth.rst
0.90851
0.69766
auth.rst
pypi
Data generation =============== This section describes how Schemathesis generates test examples and their serialization process. Schemathesis converts Open API schemas to compatible JSON Schemas and passes them to ``hypothesis-jsonschema``, which generates data for those schemas. .. important:: If the API schema is complex or deeply nested, data generation may be slow or produce data without much variance. It is a known behavior and caused by the way Hypothesis works internally. There are many tradeoffs in this process, and Hypothesis tries to give reasonable defaults for a typical case and not be too slow for pathological cases. Negative testing ---------------- By default, Schemathesis generates data that matches the input schema. Alternatively it can generate the contrary - examples that do not match the input schema. CLI: .. code:: text $ st run -D negative https://example.schemathesis.io/openapi.json Python: .. code:: python import schemathesis from schemathesis import DataGenerationMethod schema = schemathesis.from_uri( "https://example.schemathesis.io/openapi.json", data_generation_methods=[DataGenerationMethod.negative], ) @schema.parametrize() def test_api(case): case.call_and_validate() .. note:: At this moment, negative testing is significantly slower than positive testing. Payload serialization --------------------- When your API accepts a payload, requests should have a media type located in their ``Content-Type`` header. In Open API 3.0, you may write something like this: .. code-block:: :emphasize-lines: 7 openapi: 3.0.0 paths: /pet: post: requestBody: content: application/json: schema: type: object required: true In this example, operation ``POST /pet`` expects ``application/json`` payload. For each defined media type Schemathesis generates data according to the relevant schema (``{"type": "object"}`` in the example). .. note:: This data is stored in the ``case`` fixture you use in tests when you use our ``pytest`` integration. Before sending, this data should be serialized to the format expected by the tested operation. Schemathesis supports most common media types like ``application/json`` and ``text/plain`` out of the box and allows you to add support for other media types via the ``serializers`` mechanism. Schemathesis uses ``requests`` to send API requests over network and ``werkzeug.Client`` for direct WSGI integration. Serializers define the process of transforming generated Python objects into structures that can be sent by these tools. If Schemathesis is unable to serialize data for a media type, the generated samples will be rejected. If an API operation does not define media types that Schemathesis can serialize, you will see a ``Unsatisfiable`` error. If the operation under tests considers payload to be optional, these cases are still generated by Schemathesis, but not passed to serializers. CSV data example ~~~~~~~~~~~~~~~~ In this example, we will define an operation that expects CSV data and setup a serializer for it. Even though, Open API does not define a standard way to describe the structure of CSV payload, we can use the ``array`` type to describe it: .. code-block:: :emphasize-lines: 8-21 paths: /csv: post: requestBody: content: text/csv: schema: items: additionalProperties: false properties: first_name: pattern: \A[A-Za-z]*\Z type: string last_name: pattern: \A[A-Za-z]*\Z type: string required: - first_name - last_name type: object type: array required: true responses: '200': description: OK This schema describes a CSV structure with two string fields - ``first_name`` and ``last_name``. Schemathesis will generate lists of Python dictionaries that can be serialized by ``csv.DictWriter``. You are free to write a schema of any complexity, but be aware that Schemathesis may generate uncommon data that your serializer will need to handle. In this example we restrict string characters only to ASCII letters to avoid handling Unicode symbols for simplicity. First, let's define a function that will transform lists of dictionaries to CSV strings: .. code-block:: python import csv from io import StringIO def to_csv(data): if not data: # Empty CSV file return "" output = StringIO() # Assume all items have the same fields field_names = sorted(data[0].keys()) writer = csv.DictWriter(output, field_names) writer.writeheader() writer.writerows(data) return output.getvalue() .. note:: You can take a look at the official `csv module documentation <https://docs.python.org/3/library/csv.html>`_ for more examples of CSV serialization. Second, register a serializer class via the ``schemathesis.serializer`` decorator: .. code-block:: python :emphasize-lines: 4 import schemathesis @schemathesis.serializer("text/csv") class CSVSerializer: ... This decorator requires the name of the media type you need to handle and optionally accepts additional media types via its ``aliases`` keyword argument. Third, the serializer should have two methods - ``as_requests`` and ``as_werkzeug``. .. code-block:: python ... class CSVSerializer: def as_requests(self, context, value): if isinstance(value, bytes): return {"data": value} return {"data": to_csv(value)} def as_werkzeug(self, context, value): if isinstance(value, bytes): return {"data": value} return {"data": to_csv(value)} They should return dictionaries of keyword arguments that will be passed to ``requests.request`` and ``werkzeug.Client.open``, respectively. With the CSV example, we create payload with the ``to_csv`` function defined earlier and return it as ``data``, which is valid for both cases. Note that both methods explicitly handle binary data - for non-binary media types, it may happen if the API schema contains examples via the ``externalValue`` keyword. In these cases, the loaded example is passed directly as binary data. Additionally, you have ``context`` where you can access the current test case via ``context.case``. .. important:: Please, note that ``value`` will match your schema in positive testing scenarios, and it is your responsibility to handle errors during data serialization.
/schemathesis-3.19.2.tar.gz/schemathesis-3.19.2/docs/how.rst
0.90341
0.753399
how.rst
pypi
from typing import Any from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type import synapseclient # type: ignore import pandas # type: ignore class SynapseTableNameError(Exception): """SynapseTableNameError""" def __init__(self, message: str, table_name: str) -> None: """ Args: message (str): A message describing the error table_name (str): The name of the table """ self.message = message self.table_name = table_name super().__init__(self.message) def __str__(self) -> str: return f"{self.message}:{self.table_name}" class SynapseDeleteRowsError(Exception): """SynapseDeleteRowsError""" def __init__(self, message: str, table_id: str, columns: list[str]) -> None: """ Args: message (str): A message describing the error table_id (str): The synapse id of the table columns (list[str]): A list of columns in the synapse table """ self.message = message self.table_id = table_id self.columns = columns super().__init__(self.message) def __str__(self) -> str: return f"{self.message}; table_id:{self.table_id}; columns: {', '.join(self.columns)}" class Synapse: # pylint: disable=too-many-public-methods """ The Synapse class handles interactions with a project in Synapse. """ def __init__(self, auth_token: str, project_id: str) -> None: """Init Args: auth_token (str): A Synapse auth_token project_id (str): A Synapse id for a project """ self.project_id = project_id syn = synapseclient.Synapse() syn.login(authToken=auth_token) self.syn = syn self.project_id = project_id def download_csv_as_dataframe(self, synapse_id: str) -> pandas.DataFrame: """Downloads a csv file form Synapse and reads it Args: synapse_id (str): The Synapse id of the file Returns: pandas.DataFrame: The file in dataframe form """ entity = self.syn.get(synapse_id) return pandas.read_csv(entity.path) def get_table_names(self) -> list[str]: """Gets the names of the tables in the schema Returns: list[str]: A list of table names """ tables = self._get_tables() return [table["name"] for table in tables] def _get_tables(self) -> list[synapseclient.Table]: """Gets the list of Synapse table entities for the project Returns: list[synapseclient.Table]: A list of all Synapse table entities """ project = self.syn.get(self.project_id) return list(self.syn.getChildren(project, includeTypes=["table"])) def get_table_column_names(self, table_name: str) -> list[str]: """Gets the column names from a synapse table Args: table_name (str): The name of the table Returns: list[str]: A list of column names """ synapse_id = self.get_synapse_id_from_table_name(table_name) table = self.syn.get(synapse_id) columns = list(self.syn.getTableColumns(table)) return [column.name for column in columns] def get_synapse_id_from_table_name(self, table_name: str) -> str: """Gets the synapse id from the table name Args: table_name (str): The name of the table Raises: SynapseTableNameError: When no tables match the name SynapseTableNameError: When multiple tables match the name Returns: str: A synapse id """ tables = self._get_tables() matching_tables = [table for table in tables if table["name"] == table_name] if len(matching_tables) == 0: raise SynapseTableNameError("No matching tables with name:", table_name) if len(matching_tables) > 1: raise SynapseTableNameError( "Multiple matching tables with name:", table_name ) return matching_tables[0]["id"] def get_table_name_from_synapse_id(self, synapse_id: str) -> str: """Gets the table name from the synapse id Args: synapse_id (str): A synapse id Returns: str: The name of the table with the synapse id """ tables = self._get_tables() return [table["name"] for table in tables if table["id"] == synapse_id][0] def query_table( self, synapse_id: str, include_row_data: bool = False ) -> pandas.DataFrame: """Queries a whole table Args: synapse_id (str): The Synapse id of the table to delete include_row_data (bool): Include row_id and row_etag. Defaults to False. Returns: pandas.DataFrame: The queried table """ query = f"SELECT * FROM {synapse_id}" return self.execute_sql_query(query, include_row_data) def execute_sql_query( self, query: str, include_row_data: bool = False ) -> pandas.DataFrame: """Execute a Sql query Args: query (str): A SQL statement that can be run by Synapse include_row_data (bool): Include row_id and row_etag. Defaults to False. Returns: pandas.DataFrame: The queried table """ result = self.execute_sql_statement(query, include_row_data) table = pandas.read_csv(result.filepath) return table def execute_sql_statement( self, statement: str, include_row_data: bool = False ) -> Any: """Execute a SQL statement Args: statement (str): A SQL statement that can be run by Synapse include_row_data (bool): Include row_id and row_etag. Defaults to False. Returns: any: An object from """ return self.syn.tableQuery( statement, includeRowIdAndRowVersion=include_row_data ) def build_table(self, table_name: str, table: pandas.DataFrame) -> None: """Adds a table to the project based on the input table Args: table_name (str): The name fo the table table (pandas.DataFrame): A dataframe of the table """ table_copy = table.copy(deep=False) project = self.syn.get(self.project_id) table_copy = synapseclient.table.build_table(table_name, project, table_copy) self.syn.store(table_copy) def add_table(self, table_name: str, columns: list[synapseclient.Column]) -> None: """Adds a synapse table Args: table_name (str): The name of the table to be added columns (list[synapseclient.Column]): The columns to be added """ # create a dictionary with a key for every column, and value of an empty list values: dict[str, list] = {column.name: [] for column in columns} schema = synapseclient.Schema( name=table_name, columns=columns, parent=self.project_id ) table = synapseclient.Table(schema, values) self.syn.store(table) def delete_table(self, synapse_id: str) -> None: """Deletes a Synapse table Args: synapse_id (str): The Synapse id of the table to delete """ self.syn.delete(synapse_id) def replace_table(self, table_name: str, table: pandas.DataFrame) -> None: """ Replaces synapse table with table made in table. The synapse id is preserved. Args: table_name (str): The name of the table to be replaced table (pandas.DataFrame): A dataframe of the table to replace to old table with """ if table_name not in self.get_table_names(): self.build_table(table_name, table) else: synapse_id = self.get_synapse_id_from_table_name(table_name) self.delete_all_table_rows(synapse_id) self.delete_all_table_columns(synapse_id) self.add_table_columns(synapse_id, synapseclient.as_table_columns(table)) self.insert_table_rows(synapse_id, table) def insert_table_rows(self, synapse_id: str, data: pandas.DataFrame) -> None: """Insert table rows into Synapse table Args: synapse_id (str): The Synapse id of the table to add rows into data (pandas.DataFrame): The rows to be added. """ table = self.syn.get(synapse_id) self.syn.store(synapseclient.Table(table, data)) def upsert_table_rows(self, synapse_id: str, data: pandas.DataFrame) -> None: """Upserts rows from the given table Args: synapse_id (str): The Synapse ID fo the table to be upserted into data (pandas.DataFrame): The table the rows will come from """ self.syn.store(synapseclient.Table(synapse_id, data)) def delete_table_rows(self, synapse_id: str, data: pandas.DataFrame) -> None: """Deletes rows from the given table Args: synapse_id (str): The Synapse id of the table the rows will be deleted from data (pandas.DataFrame): A pandas.DataFrame. Columns must include "ROW_ID", and "ROW_VERSION" Raises: SynapseDeleteRowsError: If "ROW_ID" not in the columns of the data SynapseDeleteRowsError: If "ROW_VERSION" not in the columns of the data """ columns = list(data.columns) if "ROW_ID" not in columns: raise SynapseDeleteRowsError( "ROW_ID missing from input data", synapse_id, columns ) if "ROW_VERSION" not in columns: raise SynapseDeleteRowsError( "ROW_VERSION missing from input data", synapse_id, columns ) self.syn.delete(synapseclient.Table(synapse_id, data)) @retry( stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(synapseclient.core.exceptions.SynapseHTTPError), ) def delete_all_table_rows(self, synapse_id: str) -> None: """Deletes all rows in the Synapse table Args: synapse_id (str): The Synapse id of the table """ table = self.syn.get(synapse_id) columns = self.syn.getTableColumns(table) if len(list(columns)) > 0: results = self.syn.tableQuery(f"select * from {synapse_id}") self.syn.delete(results) @retry( stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(synapseclient.core.exceptions.SynapseHTTPError), ) def delete_all_table_columns(self, synapse_id: str) -> None: """Deletes all columns in the Synapse table Args: synapse_id (str): The Synapse id of the table """ table = self.syn.get(synapse_id) columns = self.syn.getTableColumns(table) for col in columns: table.removeColumn(col) self.syn.store(table) @retry( stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(synapseclient.core.exceptions.SynapseHTTPError), ) def add_table_columns( self, synapse_id: str, columns: list[synapseclient.Column] ) -> None: """Add columns to synapse table Args: synapse_id (str): The Synapse id of the table to add the columns to columns (list[synapseclient.Column]): The columns to be added """ table = self.syn.get(synapse_id) for col in columns: table.addColumn(col) self.syn.store(table) def get_entity_annotations(self, synapse_id: str) -> synapseclient.Annotations: """Gets the annotations for the Synapse entity Args: synapse_id (str): The Synapse id of the entity Returns: synapseclient.Annotations: The annotations of the Synapse entity in dict form. """ return self.syn.get_annotations(synapse_id) def set_entity_annotations( self, synapse_id: str, annotations: dict[str, Any] ) -> None: """Sets the entities annotations to the input annotations Args: synapse_id (str): The Synapse ID of the entity annotations (dict[str, Any]): A dictionary of annotations """ entity_annotations = self.syn.get_annotations(synapse_id) entity_annotations.clear() for key, value in annotations.items(): entity_annotations[key] = value self.syn.set_annotations(entity_annotations) def clear_entity_annotations(self, synapse_id: str) -> None: """Removes all annotations from the entity Args: synapse_id (str): The Synapse ID of the entity """ annotations = self.syn.get_annotations(synapse_id) annotations.clear() self.syn.set_annotations(annotations)
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/synapse/synapse.py
0.878686
0.188137
synapse.py
pypi
from enum import Enum from typing import Any, Optional, TypeVar from pydantic.dataclasses import dataclass from pydantic import validator class ColumnDatatype(Enum): """A generic datatype that should be supported by all database types.""" TEXT = "text" DATE = "date" INT = "int" FLOAT = "float" BOOLEAN = "boolean" # mypy types so that a class can refer to its own type X = TypeVar("X", bound="ColumnSchema") Y = TypeVar("Y", bound="TableSchema") T = TypeVar("T", bound="DatabaseSchema") @dataclass() class ColumnSchema: """A schema for a table column (attribute).""" name: str datatype: ColumnDatatype required: bool = False index: bool = False @validator("name") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if len(value) == 0: raise ValueError(f"{value} is an empty string") return value @dataclass() class ForeignKeySchema: """A foreign key in a database schema.""" name: str foreign_table_name: str foreign_column_name: str @validator("name", "foreign_table_name", "foreign_column_name") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if len(value) == 0: raise ValueError(f"{value} is an empty string") return value def get_column_dict(self) -> dict[str, str]: """Returns the foreign key in dict form Returns: dict[str, str]: A dictionary of the foreign key columns """ return { "name": self.name, "foreign_table_name": self.foreign_table_name, "foreign_column_name": self.foreign_column_name, } class TableColumnError(Exception): """A generic error involving table columns""" def __init__(self, message: str, table_name: str) -> None: """ Args: message (str): A message describing the error table_name (str): The name of the table involved in the error """ self.message = message self.table_name = table_name super().__init__(self.message) def __str__(self) -> str: """String representation""" return f"{self.message}: {self.table_name}" class TableKeyError(Exception): """TableKeyError""" def __init__( self, message: str, table_name: str, key: Optional[str] = None ) -> None: """ Args: message (str): A message describing the error table_name (str): The name of the table involved in the error key (Optional[str], optional): The name of the key involved in the error. Defaults to None. """ self.message = message self.table_name = table_name self.key = key super().__init__(self.message) def __str__(self) -> str: """String representation""" return f"{self.message}: {self.table_name}; {self.key}" @dataclass class TableSchema: """A schema for a database table.""" name: str columns: list[ColumnSchema] primary_key: str foreign_keys: list[ForeignKeySchema] @validator("name", "primary_key") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if len(value) == 0: raise ValueError(f"{value} is an empty string") return value def __post_init__(self) -> None: """Happens after initialization""" self.columns.sort(key=lambda x: x.name) self.foreign_keys.sort(key=lambda x: x.name) self._check_columns() self._check_primary_key() self._check_foreign_keys() def __eq__(self, other: Any) -> bool: """Overrides the default implementation""" return self.get_sorted_columns() == other.get_sorted_columns() def get_sorted_columns(self) -> list[ColumnSchema]: """Gets the tables columns sorted by name Returns: list[ColumnSchema]: Sorted list of columns """ return sorted(self.columns, key=lambda x: x.name) def get_column_names(self) -> list[str]: """Returns a list of names of the columns Returns: List[str]: A list of names of the attributes """ return [column.name for column in self.columns] def get_foreign_key_dependencies(self) -> list[str]: """Returns a list of table names the current table depends on Returns: list[str]: A list of table names """ return [key.foreign_table_name for key in self.foreign_keys] def get_foreign_key_names(self) -> list[str]: """Returns a list of names of the foreign keys Returns: List[str]: A list of names of the foreign keys """ return [key.name for key in self.foreign_keys] def get_foreign_key_by_name(self, name: str) -> ForeignKeySchema: """Returns foreign key Args: name (str): name of the foreign key Returns: ForeignKeySchema: The foreign key asked for """ return [key for key in self.foreign_keys if key.name == name][0] def get_column_by_name(self, name: str) -> ColumnSchema: """Returns the column Args: name (str): name of the column Returns: ColumnSchema: The ColumnSchema asked for """ return [column for column in self.columns if column.name == name][0] def _check_columns(self) -> None: """Checks that there are columns and they don't match Raises: TableColumnError: Raised when there are no columns TableColumnError: Raised when columns match """ if len(self.columns) == 0: raise TableColumnError("There are no columns", self.name) if len(self.get_column_names()) != len(set(self.get_column_names())): raise TableColumnError("There are duplicate columns", self.name) def _check_primary_key(self) -> None: """Checks the primary is in the columns Raises: TableKeyError: Raised when the primary key is missing from the columns """ if self.primary_key not in self.get_column_names(): raise TableKeyError( "Primary key is missing from columns", self.name, self.primary_key ) def _check_foreign_keys(self) -> None: """Checks each foreign key""" for key in self.foreign_keys: self._check_foreign_key(key) def _check_foreign_key(self, key: ForeignKeySchema) -> None: """Checks the foreign key exists in the columns and isn't referencing it's own table Args: key (ForeignKeySchema): A schema for a foreign key Raises: TableKeyError: Raised when the foreign key is missing from the columns TableKeyError: Raised when the foreign key references its own table """ if key.name not in self.get_column_names(): raise TableKeyError( "Foreign key is missing from columns", self.name, key.name ) if key.foreign_table_name == self.name: raise TableKeyError( "Foreign key references its own table", self.name, key.name ) class SchemaMissingTableError(Exception): """When a foreign key references an table that doesn't exist""" def __init__( self, foreign_key: str, table_name: str, foreign_table_name: str ) -> None: """ Args: foreign_key (str): The name of the foreign key table_name (str): The name of the table that the key is in foreign_table_name (str): The name of the table the key refers to that is missing """ self.message = "Foreign key references table which does not exist in schema." self.foreign_key = foreign_key self.table_name = table_name self.foreign_table_name = foreign_table_name super().__init__(self.message) def __str__(self) -> str: """String representation""" msg = ( f"Foreign key '{self.foreign_key}' in table '{self.table_name}' references table " f"'{self.foreign_table_name}' which does not exist in schema." ) return msg class SchemaMissingColumnError(Exception): """When a foreign key references an table column the table doesn't have""" def __init__( self, foreign_key: str, table_name: str, foreign_table_name: str, foreign_table_column: str, ) -> None: """ Args: foreign_key (str): The name of the foreign key table_name (str): The name of the table that the key is in foreign_table_name (str): The name of the table the key refers foreign_table_column (str): The column in the foreign table that is missing """ self.message = "Foreign key references column which does not exist." self.foreign_key = foreign_key self.table_name = table_name self.foreign_table_name = foreign_table_name self.foreign_table_column = foreign_table_column super().__init__(self.message) def __str__(self) -> str: """String representation""" msg = ( f"Foreign key '{self.foreign_key}' in table '{self.table_name}' references " f"column '{self.foreign_table_column}' which does not exist in table " f"'{self.foreign_table_name}'" ) return msg @dataclass class DatabaseSchema: """A database agnostic schema""" table_schemas: list[TableSchema] def __post_init__(self) -> None: for schema in self.table_schemas: self._check_foreign_keys(schema) def __eq__(self, other: Any) -> bool: """Overrides the default implementation""" return self.get_sorted_table_schemas() == other.get_sorted_table_schemas() def get_sorted_table_schemas(self) -> list[TableSchema]: """Gets the table schemas sorted by name Returns: list[TableSchema]: The list of sorted table schemas """ return sorted(self.table_schemas, key=lambda x: x.name) def get_dependencies(self, table_name: str) -> list[str]: """Gets the tables dependencies Args: table_name (str): The name of the table Returns: list[str]: A list of tables names the table depends on """ return self.get_schema_by_name(table_name).get_foreign_key_dependencies() def get_reverse_dependencies(self, table_name: str) -> list[str]: """Gets the names of the tables that depend on the input table Args: table_name (str): The name of the table Returns: list[str]: A list of table names that depend on the input table """ return [ schema.name for schema in self.table_schemas if table_name in schema.get_foreign_key_dependencies() ] def get_schema_names(self) -> list[str]: """Returns a list of names of the schemas Returns: List[str]: A list of names of the schemas """ return [schema.name for schema in self.table_schemas] def get_schema_by_name(self, name: str) -> TableSchema: """Returns the schema Args: name (str): name of the schema Returns: TableSchema: The TableSchema asked for """ return [schema for schema in self.table_schemas if schema.name == name][0] def _check_foreign_keys(self, schema: TableSchema) -> None: """Checks all foreign keys Args: schema (TableSchema): The schema of the table being checked """ for key in schema.foreign_keys: self._check_foreign_key_table(schema, key) self._check_foreign_key_column(schema, key) def _check_foreign_key_table( self, schema: TableSchema, key: ForeignKeySchema ) -> None: """Checks that the table the foreign key refers to exists Args: schema (TableSchema): The schema for the table being checked key (ForeignKeySchema): The foreign key being checked Raises: SchemaMissingTableError: Raised when the table a foreign key references is missing """ if key.foreign_table_name not in self.get_schema_names(): raise SchemaMissingTableError( foreign_key=key.name, table_name=schema.name, foreign_table_name=key.foreign_table_name, ) def _check_foreign_key_column( self, schema: TableSchema, key: ForeignKeySchema ) -> None: """Checks that the column the foreign key refers to exists Args: schema (TableSchema): The schema for the table being checked key (ForeignKeySchema): The foreign key being checked Raises: SchemaMissingColumnError: Raised when the column a foreign key references is missing """ foreign_schema = self.get_schema_by_name(key.foreign_table_name) if key.foreign_column_name not in foreign_schema.get_column_names(): raise SchemaMissingColumnError( foreign_key=key.name, table_name=schema.name, foreign_table_name=key.foreign_table_name, foreign_table_column=key.foreign_column_name, )
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/db_schema/db_schema.py
0.958158
0.371279
db_schema.py
pypi
# pylint: disable=duplicate-code from typing import Any from os import getenv from datetime import datetime import pytz import requests import pandas from schematic_db.manifest_store.manifest_metadata_list import ManifestMetadataList class SchematicAPIError(Exception): """When schematic API response status code is anything other than 200""" def __init__( # pylint:disable=too-many-arguments self, endpoint_url: str, status_code: int, reason: str, time: datetime, params: dict[str, Any], ) -> None: """ Args: endpoint_url (str): The url of the endpoint status_code (int): The status code given in the response reason (str): The reason given in the response time (datetime): The time the API was called params (dict[str, Any]): The parameters sent with the API call """ self.message = "Error accessing Schematic endpoint" self.endpoint_url = endpoint_url self.status_code = status_code self.reason = reason self.time = time self.params = params super().__init__(self.message) def __str__(self) -> str: """ Returns: str: The description of the error """ return ( f"{self.message}; " f"URL: {self.endpoint_url}; " f"Code: {self.status_code}; " f"Reason: {self.reason}; " f"Time (PST): {self.time}; " f"Parameters: {self.params}" ) class SchematicAPITimeoutError(Exception): """When schematic API timed out""" def __init__( self, endpoint_url: str, time: datetime, params: dict[str, Any], ) -> None: """ Args: endpoint_url (str): The url of the endpoint time (datetime): The time the API was called params (dict[str, Any]): The parameters sent with the API call """ self.message = "Schematic endpoint timed out" self.endpoint_url = endpoint_url self.time = time self.params = params super().__init__(self.message) def __str__(self) -> str: """ Returns: str: The description of the error """ return ( f"{self.message}; " f"URL: {self.endpoint_url}; " f"Time (PST): {self.time}; " f"Parameters: {self.params}" ) def create_schematic_api_response( endpoint_path: str, params: dict[str, Any], timeout: int = 30, ) -> requests.Response: """Performs a GET request on the schematic API Args: endpoint_path (str): The path for the endpoint in the schematic API params (dict): The parameters in dict form for the requested endpoint timeout (int): The amount of seconds the API call has to run Raises: SchematicAPIError: When response code is anything other than 200 SchematicAPITimeoutError: When API call times out Returns: requests.Response: The response from the API """ api_url = getenv("API_URL", "https://schematic.api.sagebionetworks.org/v1/") endpoint_url = f"{api_url}/{endpoint_path}" start_time = datetime.now(pytz.timezone("US/Pacific")) try: response = requests.get(endpoint_url, params=params, timeout=timeout) except requests.exceptions.Timeout as exc: raise SchematicAPITimeoutError( endpoint_url, start_time, filter_params(params) ) from exc if response.status_code != 200: raise SchematicAPIError( endpoint_url, response.status_code, response.reason, start_time, filter_params(params), ) return response def filter_params(params: dict[str, Any]) -> dict[str, Any]: """Removes any parameters from the input dictionary that should not be seen. Args: params (dict[str, Any]): A dictionary of parameters Returns: dict[str, Any]: A dictionary of parameters with any secrets removed """ secret_params = ["access_token"] for param in secret_params: params.pop(param, None) return params def find_class_specific_properties(schema_url: str, schema_class: str) -> list[str]: """Find properties specifically associated with a given class Args: schema_url (str): Data Model URL schema_class (str): The class/name fo the component Returns: list[str]: A list of properties of a given class/component. """ params = {"schema_url": schema_url, "schema_class": schema_class} response = create_schematic_api_response( "explorer/find_class_specific_properties", params ) return response.json() def get_property_label_from_display_name( schema_url: str, display_name: str, strict_camel_case: bool = True ) -> str: """Converts a given display name string into a proper property label string Args: schema_url (str): Data Model URL display_name (str): The display name to be converted strict_camel_case (bool, optional): If true the more strict way of converting to camel case is used. Defaults to True. Returns: str: the property label name """ params = { "schema_url": schema_url, "display_name": display_name, "strict_camel_case": strict_camel_case, } response = create_schematic_api_response( "explorer/get_property_label_from_display_name", params ) return response.json() def get_graph_by_edge_type(schema_url: str, relationship: str) -> list[tuple[str, str]]: """Get a subgraph containing all edges of a given type (aka relationship) Args: schema_url (str): Data Model URL relationship (str): Relationship (i.e. parentOf, requiresDependency, rangeValue, domainValue) Returns: list[tuple[str, str]]: A subgraph in the form of a list of tuples. """ params = {"schema_url": schema_url, "relationship": relationship} response = create_schematic_api_response("schemas/get/graph_by_edge_type", params) return response.json() def get_project_manifests( access_token: str, project_id: str, asset_view: str ) -> ManifestMetadataList: """Gets all metadata manifest files across all datasets in a specified project. Args: access_token (str): access token project_id (str): Project ID asset_view (str): ID of view listing all project data assets. For example, for Synapse this would be the Synapse ID of the fileview listing all data assets for a given project.(i.e. master_fileview in config.yml) Returns: ManifestMetadataList: A list of manifests in Synapse """ params = { "access_token": access_token, "project_id": project_id, "asset_view": asset_view, } response = create_schematic_api_response( "storage/project/manifests", params, timeout=1000 ) metadata_list = [] for item in response.json(): metadata_list.append( { "dataset_id": item[0][0], "dataset_name": item[0][1], "manifest_id": item[1][0], "manifest_name": item[1][1], "component_name": item[2][0], } ) return ManifestMetadataList(metadata_list) def download_manifest(access_token: str, manifest_id: str) -> pandas.DataFrame: """Downloads a manifest as a pd.dataframe Args: access_token (str): Access token manifest_id (str): The synapse id of the manifest Returns: pd.DataFrame: The manifest in dataframe form """ params = { "access_token": access_token, "manifest_id": manifest_id, "as_json": True, } response = create_schematic_api_response("manifest/download", params, timeout=1000) manifest = pandas.DataFrame(response.json()) return manifest def is_node_required(schema_url: str, node_label: str) -> bool: """Checks if node is required Args: schema_url (str): Data Model URL node_label (str): Label/display name for the node to check Returns: bool: Wether or not the node is required """ params = {"schema_url": schema_url, "node_display_name": node_label} response = create_schematic_api_response("schemas/is_node_required", params) return response.json() def get_node_validation_rules(schema_url: str, node_display_name: str) -> list[str]: """Gets the validation rules for the node Args: schema_url (str): Data Model URL node_display_name (str): Label/display name for the node to check Returns: list[str]: A list of validation rules """ params = { "schema_url": schema_url, "node_display_name": node_display_name, } response = create_schematic_api_response( "schemas/get_node_validation_rules", params ) return response.json()
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/api_utils/api_utils.py
0.850407
0.195959
api_utils.py
pypi
import pandas as pd from schematic_db.rdb.rdb import RelationalDatabase from schematic_db.query_store.synapse_query_store import QueryStore class DuplicateColumnError(Exception): """Occurs when a query results in a table with duplicate columns""" def __init__(self, table_name: str) -> None: """ Args: table_name (str): The name fo the table """ self.message = "Query result has duplicate columns" self.table_name = table_name super().__init__(self.message) def __str__(self) -> str: return f"{self.message}: {self.table_name}" class RDBQueryer: """Queries a database and uploads the results to a query store.""" def __init__( self, rdb: RelationalDatabase, query_store: QueryStore, ): """ Args: rdb (RelationalDatabase): A relational database object to query query_store (QueryStore): A query store object that will store the results of the query """ self.rdb = rdb self.query_store = query_store def store_query_results(self, csv_path: str) -> None: """Stores the results of queries Takes a csv file with two columns named "query" and "table_name", and runs each query, storing the result in the query_result_store as a table. Args: csv_path (str): A path to a csv file. """ csv = pd.read_csv(csv_path) for _, row in csv.iterrows(): self.store_query_result(row["query"], row["table_name"]) def store_query_result(self, query: str, table_name: str) -> None: """Stores the result of a query Args: query (str): A query in SQL form table_name (str): The name of the table the result will be stored as Raises: DuplicateColumnError: Raised when the query result has duplicate columns """ query_result = self.rdb.execute_sql_query(query) column_names = list(query_result.columns) if len(column_names) != len(set(column_names)): raise DuplicateColumnError(table_name) self.query_store.store_query_result(table_name, query_result)
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/rdb_queryer/rdb_queryer.py
0.860823
0.255564
rdb_queryer.py
pypi
from typing import Any import numpy import pandas import sqlalchemy import sqlalchemy.dialects.postgresql from sqlalchemy.inspection import inspect from sqlalchemy import exc from schematic_db.db_schema.db_schema import ColumnDatatype from .sql_alchemy_database import SQLAlchemyDatabase, SQLConfig from .rdb import UpsertDatabaseError class PostgresDatabase(SQLAlchemyDatabase): """PostgresDatabase - Represents a Postgres database. - Implements the RelationalDatabase interface. - Handles Postgres specific functionality. """ def __init__( self, config: SQLConfig, verbose: bool = False, ): """Init Args: config (SQLConfig): A MySQL config verbose (bool): Sends much more to logging.info """ super().__init__(config, verbose, "postgresql") column_datatypes = self.column_datatypes.copy() column_datatypes.update( { sqlalchemy.dialects.postgresql.base.TEXT: ColumnDatatype.TEXT, sqlalchemy.dialects.postgresql.base.VARCHAR: ColumnDatatype.TEXT, sqlalchemy.dialects.postgresql.base.INTEGER: ColumnDatatype.INT, sqlalchemy.dialects.postgresql.base.DOUBLE_PRECISION: ColumnDatatype.FLOAT, sqlalchemy.dialects.postgresql.base.FLOAT: ColumnDatatype.FLOAT, sqlalchemy.dialects.postgresql.base.DATE: ColumnDatatype.DATE, } ) self.column_datatypes = column_datatypes def upsert_table_rows(self, table_name: str, data: pandas.DataFrame) -> None: """Inserts and/or updates the rows of the table Args: table_name (str): The name of the table to be upserted data (pandas.DataFrame): The rows to be upserted Raises: UpsertDatabaseError: Raised when a SQLAlchemy error caught """ table = self._get_table_object(table_name) data = data.replace({numpy.nan: None}) rows = data.to_dict("records") table_schema = self._get_current_metadata().tables[table_name] primary_key = inspect(table_schema).primary_key.columns.values()[0].name try: self._upsert_table_rows(rows, table, table_name, primary_key) except exc.SQLAlchemyError as exception: raise UpsertDatabaseError(table_name) from exception def _upsert_table_rows( self, rows: list[dict[str, Any]], table: sqlalchemy.Table, table_name: str, primary_key: str, ) -> None: """Upserts a pandas dataframe into a Postgres table Args: rows (list[dict[str, Any]]): A list of rows of a dataframe to be upserted table (sqlalchemy.Table): A sqlalchemy table entity to be upserted into table_name (str): The name of the table to be upserted into primary_key (str): The name fo the primary key of the table being upserted into """ statement = sqlalchemy.dialects.postgresql.insert(table).values(rows) update_columns = { col.name: col for col in statement.excluded if col.name != primary_key } statement = statement.on_conflict_do_update( constraint=f"{table_name}_pkey", set_=update_columns ) with self.engine.begin() as conn: conn.execute(statement) def query_table(self, table_name: str) -> pandas.DataFrame: """Queries a whole table Args: table_name (str): The name of the table to query Returns: pandas.DataFrame: The table in pandas.dataframe form """ query = f'SELECT * FROM "{table_name}"' return self.execute_sql_query(query)
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/rdb/postgres.py
0.79546
0.227995
postgres.py
pypi
from abc import ABC, abstractmethod import pandas as pd from schematic_db.db_schema.db_schema import TableSchema class UpsertDatabaseError(Exception): """Raised when a database class catches an error doing an upsert""" def __init__(self, table_name: str) -> None: """ Args: table_name (str): The name of the table being upserted into """ self.message = "Error upserting table" self.table_name = table_name super().__init__(self.message) def __str__(self) -> str: return f"{self.message}; " f"Table Name: {self.table_name}" class InsertDatabaseError(Exception): """Raised when a database class catches an error doing an insert""" def __init__(self, table_name: str) -> None: """ Args: table_name (str): The name of the table being inserted into """ self.message = "Error inserting table" self.table_name = table_name super().__init__(self.message) def __str__(self) -> str: return f"{self.message}; " f"Table Name: {self.table_name}" class RelationalDatabase(ABC): """An interface for relational database types""" @abstractmethod def get_table_names(self) -> list[str]: """Gets the names of the tables in the database Returns: list[str]: A list of table names """ @abstractmethod def get_table_schema(self, table_name: str) -> TableSchema: """Returns a TableSchema created from the current database table Args: table_name (str): The name of the table Returns: Optional[TableSchema]: The schema for the given table """ @abstractmethod def execute_sql_query(self, query: str) -> pd.DataFrame: """Executes a valid SQL statement Should be used when a result is expected. Args: query (str): A SQL statement Returns: pd.DataFrame: The table """ @abstractmethod def query_table(self, table_name: str) -> pd.DataFrame: """Queries a whole table Args: table_name (str): The name of the table Returns: pd.DataFrame: The table """ @abstractmethod def add_table(self, table_name: str, table_schema: TableSchema) -> None: """Adds a table to the schema Args: table_name (str): The name of the table table_schema (TableSchema): The schema for the table being added """ @abstractmethod def drop_table(self, table_name: str) -> None: """Drops a table from the schema Args: table_name (str): The id(name) of the table to be dropped """ @abstractmethod def drop_all_tables(self) -> None: """Drops all tables from the database""" @abstractmethod def insert_table_rows(self, table_name: str, data: pd.DataFrame) -> None: """Inserts rows into the given table Args: table_name (str): The name of the table the rows be upserted into data (pd.DataFrame): A pandas.DataFrame. It must contain the primary keys of the table """ @abstractmethod def upsert_table_rows(self, table_name: str, data: pd.DataFrame) -> None: """Upserts rows into the given table Args: table_name (str): The name of the table the rows be upserted into data (pd.DataFrame): A pandas.DataFrame. It must contain the primary keys of the table """ @abstractmethod def delete_table_rows(self, table_name: str, data: pd.DataFrame) -> None: """Deletes rows from the given table Args: table_name (str): The name of the table the rows will be deleted from data (pd.DataFrame): A pandas.DataFrame. It must contain the primary keys of the table """
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/rdb/rdb.py
0.857872
0.349699
rdb.py
pypi
from typing import Any import pandas import numpy import sqlalchemy import sqlalchemy.dialects.mysql from sqlalchemy import exc from schematic_db.db_schema.db_schema import ( ColumnDatatype, ColumnSchema, ) from .sql_alchemy_database import SQLAlchemyDatabase, SQLConfig from .rdb import UpsertDatabaseError class MySQLDatabase(SQLAlchemyDatabase): """MySQLDatabase - Represents a mysql database. - Implements the RelationalDatabase interface. - Handles MYSQL specific functionality. """ def __init__( self, config: SQLConfig, verbose: bool = False, ): """Init Args: config (MySQLConfig): A MySQL config verbose (bool): Sends much more to logging.info """ super().__init__(config, verbose, "mysql") column_datatypes = self.column_datatypes.copy() column_datatypes.update( { sqlalchemy.dialects.mysql.VARCHAR: ColumnDatatype.TEXT, sqlalchemy.dialects.mysql.TEXT: ColumnDatatype.TEXT, sqlalchemy.dialects.mysql.INTEGER: ColumnDatatype.INT, sqlalchemy.dialects.mysql.DOUBLE: ColumnDatatype.FLOAT, sqlalchemy.dialects.mysql.FLOAT: ColumnDatatype.FLOAT, sqlalchemy.dialects.mysql.DATE: ColumnDatatype.DATE, } ) self.column_datatypes = column_datatypes def upsert_table_rows(self, table_name: str, data: pandas.DataFrame) -> None: """Inserts and/or updates the rows of the table Args: table_name (str): The name of the table to be upserted data (pandas.DataFrame): The rows to be upserted Raises: UpsertDatabaseError: Raised when a SQLAlchemy error caught """ table = self._get_table_object(table_name) data = data.replace({numpy.nan: None}) rows = data.to_dict("records") for row in rows: try: self._upsert_table_row(row, table, table_name) except exc.SQLAlchemyError as exception: raise UpsertDatabaseError(table_name) from exception def _upsert_table_row( self, row: dict[str, Any], table: sqlalchemy.Table, table_name: str, # pylint: disable=unused-argument ) -> None: """Upserts a row into a MySQL table Args: row (dict[str, Any]): A row of a dataframe to be upserted table (sqlalchemy.Table): A sqlalchemy Table to be upserted into table_name (str): The name of the table to be upserted into (unused) """ statement = sqlalchemy.dialects.mysql.insert(table).values(row) statement = statement.on_duplicate_key_update(**row) with self.engine.begin() as conn: conn.execute(statement) def _get_datatype( self, column_schema: ColumnSchema, primary_key: str, foreign_keys: list[str] ) -> Any: """ Gets the datatype of the column based on its schema Args: column_schema (ColumnSchema): The schema of the column primary_key (str): The primary key fo the column (unused) foreign_keys (list[str]): A list of foreign keys for the the column Returns: Any: The SQLAlchemy datatype """ datatypes = { ColumnDatatype.TEXT: sqlalchemy.VARCHAR(5000), ColumnDatatype.DATE: sqlalchemy.Date, ColumnDatatype.INT: sqlalchemy.Integer, ColumnDatatype.FLOAT: sqlalchemy.Float, ColumnDatatype.BOOLEAN: sqlalchemy.Boolean, } # Keys need to be max 100 chars if column_schema.datatype == ColumnDatatype.TEXT and ( column_schema.name == primary_key or column_schema.name in foreign_keys ): return sqlalchemy.VARCHAR(100) # Strings that need to be indexed need to be max 1000 chars if column_schema.index and column_schema.datatype == ColumnDatatype.TEXT: return sqlalchemy.VARCHAR(1000) # Otherwise use datatypes dict return datatypes[column_schema.datatype]
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/rdb/mysql.py
0.825132
0.271916
mysql.py
pypi
# pylint: disable=duplicate-code from typing import Any import json import re from pydantic.dataclasses import dataclass from pydantic import validator @dataclass() class ManifestMetadata: """Metadata for a manifest in Synapse.""" dataset_id: str dataset_name: str manifest_id: str manifest_name: str component_name: str @validator("dataset_id", "manifest_id") @classmethod def validate_synapse_id(cls, value: str) -> str: """Check if string is a valid synapse id Args: value (str): A string Raises: ValueError: If the value isn't a valid Synapse id Returns: (str): The input value """ if not re.search("^syn[0-9]+", value): raise ValueError(f"{value} is not a valid Synapse id") return value @validator("dataset_name", "manifest_name", "component_name") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if len(value) == 0: raise ValueError(f"{value} is an empty string") return value def to_dict(self) -> dict[str, str]: """Returns object attributes as dict Returns: dict[str, str]: dict of object attributes """ attribute_dict = vars(self) attribute_names = [ "dataset_id", "dataset_name", "manifest_id", "manifest_name", "component_name", ] return {key: attribute_dict[key] for key in attribute_names} def __repr__(self) -> str: """Prints object as dict""" return json.dumps(self.to_dict(), indent=4) class ManifestMetadataList: """A list of Manifest Metadata""" def __init__(self, metadata_input: list[dict[str, Any]]) -> None: """ Args: metadata_input (list[dict[str, Any]]): A list of dicts where each dict has key values pairs that correspond to the arguments of ManifestMetadata. """ metadata_list: list[ManifestMetadata] = [] for item in metadata_input.copy(): try: metadata = ManifestMetadata(**item) except ValueError: pass else: metadata_list.append(metadata) self.metadata_list = metadata_list def __repr__(self) -> str: """Prints each metadata object as dict""" return json.dumps( [metadata.to_dict() for metadata in self.metadata_list], indent=4 ) def get_dataset_ids_for_component(self, component_name: str) -> list[str]: """Gets the dataset ids from the manifest metadata matching the component name Args: component_name (str): The name of the component to get the manifest datasets ids for Returns: list[str]: A list of synapse ids for the manifest datasets """ return [ metadata.dataset_id for metadata in self.metadata_list if metadata.component_name == component_name ] def get_manifest_ids_for_component(self, component_name: str) -> list[str]: """Gets the manifest ids from the manifest metadata matching the component name Args: component_name (str): The name of the component to get the manifest ids for Returns: list[str]: A list of synapse ids for the manifests """ return [ metadata.manifest_id for metadata in self.metadata_list if metadata.component_name == component_name ]
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/manifest_store/manifest_metadata_list.py
0.897762
0.3214
manifest_metadata_list.py
pypi
from typing import Optional import pandas from deprecation import deprecated from schematic_db.schema_graph.schema_graph import SchemaGraph from schematic_db.api_utils.api_utils import ManifestMetadataList from schematic_db.synapse.synapse import Synapse from .manifest_store import ManifestStore, ManifestStoreConfig @deprecated( deprecated_in="0.0.29", details="This is both an experimental and temporary class that will be removed in the future.", ) class SynapseManifestStore(ManifestStore): """An interface for interacting with manifests""" def __init__(self, config: ManifestStoreConfig) -> None: """ Args: config (ManifestStoreConfig): A config with setup values """ self.synapse_asset_view_id = config.synapse_asset_view_id self.synapse = Synapse(config.synapse_auth_token, config.synapse_project_id) self.schema_graph = SchemaGraph(config.schema_url) self.manifest_metadata: Optional[ManifestMetadataList] = None def create_sorted_table_name_list(self) -> list[str]: """ Creates a table name list such tables always come after ones they depend on. This order is how tables in a database should be built and/or updated. Returns: list[str]: A list of tables names """ return self.schema_graph.create_sorted_table_name_list() def get_manifest_metadata(self) -> ManifestMetadataList: """Gets the current objects manifest metadata.""" query = ( "SELECT id, name, parentId, Component FROM " f"{self.synapse_asset_view_id} " "WHERE type = 'file' AND Component IS NOT NULL AND name LIKE '%csv'" ) dataframe = self.synapse.execute_sql_query(query) manifest_list = [] for _, row in dataframe.iterrows(): manifest_list.append( { "dataset_id": row["parentId"], "dataset_name": "none", "manifest_id": row["id"], "manifest_name": row["name"], "component_name": row["Component"], } ) return ManifestMetadataList(manifest_list) def get_manifest_ids(self, name: str) -> list[str]: """Gets the manifest ids for a table(component) Args: name (str): The name of the table Returns: list[str]: The manifest ids for the table """ return self.get_manifest_metadata().get_manifest_ids_for_component(name) def download_manifest(self, manifest_id: str) -> pandas.DataFrame: """Downloads the manifest Args: manifest_id (str): The synapse id of the manifest Returns: pandas.DataFrame: The manifest in dataframe form """ return self.synapse.download_csv_as_dataframe(manifest_id)
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/manifest_store/synapse_manifest_store.py
0.906457
0.156846
synapse_manifest_store.py
pypi
# pylint: disable=duplicate-code from typing import Optional import pandas from schematic_db.api_utils.api_utils import ( get_project_manifests, download_manifest, ManifestMetadataList, ) from schematic_db.schema_graph.schema_graph import SchemaGraph from .manifest_store import ManifestStore, ManifestStoreConfig class ManifestMissingPrimaryKeyError(Exception): """Raised when a manifest is missing its primary key""" def __init__( self, table_name: str, dataset_id: str, primary_key: str, manifest_columns: list[str], ): """ Args: table_name (str): The name of the table dataset_id (str): The dataset id for the component primary_key (str): The name of the primary key manifest_columns (list[str]): The columns in the manifest """ self.message = "Manifest is missing its primary key" self.table_name = table_name self.dataset_id = dataset_id self.primary_key = primary_key self.manifest_columns = manifest_columns super().__init__(self.message) def __str__(self) -> str: """String representation""" return ( f"{self.message}; table name:{self.table_name}; " f"dataset_id:{self.dataset_id}; primary keys:{self.primary_key}; " f"manifest columns:{self.manifest_columns}" ) class APIManifestStore(ManifestStore): """ The APIManifestStore class interacts with the Schematic API download manifests. """ def __init__(self, config: ManifestStoreConfig) -> None: """ The Schema class handles interactions with the schematic API. The main responsibilities are creating the database schema, and retrieving manifests. Args: config (SchemaConfig): A config describing the basic inputs for the schema object """ self.synapse_project_id = config.synapse_project_id self.synapse_asset_view_id = config.synapse_asset_view_id self.synapse_auth_token = config.synapse_auth_token self.schema_graph = SchemaGraph(config.schema_url) self.manifest_metadata: Optional[ManifestMetadataList] = None def create_sorted_table_name_list(self) -> list[str]: """ Uses the schema graph to create a table name list such tables always come after ones they depend on. This order is how tables in a database should be built and/or updated. Returns: list[str]: A list of tables names """ return self.schema_graph.create_sorted_table_name_list() def get_manifest_metadata(self) -> ManifestMetadataList: """Gets the manifest metadata Returns: ManifestMetadataList: the manifest metadata """ # When first initialized, manifest metadata is None if self.manifest_metadata is None: self.manifest_metadata = get_project_manifests( access_token=self.synapse_auth_token, project_id=self.synapse_project_id, asset_view=self.synapse_asset_view_id, ) assert self.manifest_metadata is not None return self.manifest_metadata def get_manifest_ids(self, name: str) -> list[str]: """Gets the manifest ids for a table(component) Args: name (str): The name of the table Returns: list[str]: The manifest ids for the table """ return self.get_manifest_metadata().get_manifest_ids_for_component(name) def download_manifest(self, manifest_id: str) -> pandas.DataFrame: """Downloads the manifest Args: manifest_id (str): The synapse id of the manifest Returns: pandas.DataFrame: The manifest in dataframe form """ manifest = download_manifest(self.synapse_auth_token, manifest_id) return manifest
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/manifest_store/api_manifest_store.py
0.891457
0.154663
api_manifest_store.py
pypi
from abc import ABC, abstractmethod import re import pandas from pydantic.dataclasses import dataclass from pydantic import validator import validators from schematic_db.api_utils.api_utils import ManifestMetadataList @dataclass() class ManifestStoreConfig: """ A config for a ManifestStore. Properties: schema_url (str): A url to the jsonld schema file synapse_project_id (str): The synapse id to the project where the manifests are stored. synapse_asset_view_id (str): The synapse id to the asset view that tracks the manifests. synapse_auth_token (str): A synapse token with download permissions for both the synapse_project_id and synapse_asset_view_id """ schema_url: str synapse_project_id: str synapse_asset_view_id: str synapse_auth_token: str @validator("schema_url") @classmethod def validate_url(cls, value: str) -> str: """Validates that the value is a valid URL""" valid_url = validators.url(value) if not valid_url: raise ValueError(f"{value} is a valid url") return value @validator("schema_url") @classmethod def validate_is_jsonld(cls, value: str) -> str: """Validates that the value is a jsonld file""" is_jsonld = value.endswith(".jsonld") if not is_jsonld: raise ValueError(f"{value} does end with '.jsonld'") return value @validator("synapse_project_id", "synapse_asset_view_id") @classmethod def validate_synapse_id(cls, value: str) -> str: """Check if string is a valid synapse id""" if not re.search("^syn[0-9]+", value): raise ValueError(f"{value} is not a valid Synapse id") return value @validator("synapse_auth_token") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char)""" if len(value) == 0: raise ValueError(f"{value} is an empty string") return value class ManifestStore(ABC): """An interface for interacting with manifests""" @abstractmethod def create_sorted_table_name_list(self) -> list[str]: """ Creates a table name list such tables always come after ones they depend on. This order is how tables in a database should be built and/or updated. Returns: list[str]: A list of tables names """ @abstractmethod def get_manifest_metadata(self) -> ManifestMetadataList: """Gets the current objects manifest metadata.""" @abstractmethod def get_manifest_ids(self, name: str) -> list[str]: """Gets the manifest ids for a table(component) Args: name (str): The name of the table Returns: list[str]: The manifest ids for the table """ @abstractmethod def download_manifest(self, manifest_id: str) -> pandas.DataFrame: """Downloads the manifest Args: manifest_id (str): The synapse id of the manifest Returns: pandas.DataFrame: The manifest in dataframe form """
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/manifest_store/manifest_store.py
0.852997
0.23105
manifest_store.py
pypi
from typing import Optional, Any from deprecation import deprecated from schematic_db.db_schema.db_schema import ( ForeignKeySchema, ColumnSchema, ColumnDatatype, ) DATATYPES = { "str": ColumnDatatype.TEXT, "float": ColumnDatatype.FLOAT, "int": ColumnDatatype.INT, "date": ColumnDatatype.DATE, } @deprecated( deprecated_in="0.0.27", details="Functionality will be accomplished with future Schematic API calls.", ) class DatabaseTableConfig: # pylint: disable=too-few-public-methods """A config for database specific items for one table""" def __init__( self, name: str, primary_key: Optional[str] = None, foreign_keys: Optional[list[dict[str, str]]] = None, columns: Optional[list[dict[str, Any]]] = None, ) -> None: """ Init """ self.name = name self.primary_key = primary_key if foreign_keys is None: self.foreign_keys = None else: self.foreign_keys = [ ForeignKeySchema( name=key["column_name"], foreign_table_name=key["foreign_table_name"], foreign_column_name=key["foreign_column_name"], ) for key in foreign_keys ] if columns is None: self.columns = None else: self.columns = [ ColumnSchema( name=column["column_name"], datatype=DATATYPES[column["datatype"]], required=column["required"], index=column["index"], ) for column in columns ] def _check_column_names(self) -> None: """Checks that column names are not duplicated Raises: ValueError: Raised when there are duplicate column names """ column_names = self._get_column_names() if column_names is not None: if len(column_names) != len(list(set(column_names))): raise ValueError("There are duplicate column names") def _get_column_names(self) -> Optional[list[str]]: """Gets the list of column names in the config Returns: list[str]: A list of column names """ if self.columns is not None: return [column.name for column in self.columns] return None def _check_foreign_key_name(self) -> None: """Checks that foreign keys are not duplicated Raises: ValueError: Raised when there are duplicate foreign keys """ foreign_keys_names = self._get_foreign_key_names() if foreign_keys_names is not None: if len(foreign_keys_names) != len(list(set(foreign_keys_names))): raise ValueError("There are duplicate column names") def _get_foreign_key_names(self) -> Optional[list[str]]: """Gets the list of foreign key names in the config Returns: list[str]: A list of foreign key names """ if self.foreign_keys is not None: return [key.name for key in self.foreign_keys] return None class DatabaseConfig: """A config for database specific items""" def __init__(self, tables: list[dict[str, Any]]) -> None: """ Init """ self.tables: list[DatabaseTableConfig] = [ DatabaseTableConfig(**table) for table in tables ] self._check_table_names() def get_primary_key(self, table_name: str) -> Optional[str]: """Gets the primary key for an table Args: table_name (str): The name of the table Returns: Optional[str]: The primary key """ table = self._get_table_by_name(table_name) return None if table is None else table.primary_key def get_foreign_keys(self, table_name: str) -> Optional[list[ForeignKeySchema]]: """Gets the foreign keys for an table Args: table_name (str): The name of the table Returns: Optional[list[ForeignKeySchema]]: The foreign keys """ table = self._get_table_by_name(table_name) return None if table is None else table.foreign_keys def get_columns(self, table_name: str) -> Optional[list[ColumnSchema]]: """Gets the columns for an table Args: table_name (str): The name of the table Returns: Optional[list[ColumnSchema]]: The list of columns """ table = self._get_table_by_name(table_name) return None if table is None else table.columns def get_column(self, table_name: str, column_name: str) -> Optional[ColumnSchema]: """Gets a column for a table Args: table_name (str): The name of the table to get the column for column_name (str): The name of the column to get Returns: Optional[list[ColumnSchema]]: The list of columns """ columns = self.get_columns(table_name) if columns is None: return None columns = [column for column in columns if column.name == column_name] if len(columns) == 0: return None return columns[0] def _get_table_by_name(self, table_name: str) -> Optional[DatabaseTableConfig]: """Gets the config for the table if it exists Args: table_name (str): The name of the table Returns: Optional[DatabaseTableConfig]: The config for the table if it exists """ tables = [table for table in self.tables if table.name == table_name] if len(tables) == 0: return None return tables[0] def _get_table_names(self) -> list[str]: """Gets the list of tables names in the config Returns: list[str]: A list of table names """ return [table.name for table in self.tables] def _check_table_names(self) -> None: """Checks that the table names are not duplicated Raises: ValueError: Raised when there are duplicate table names """ n_table_names = len(self._get_table_names()) n_unique_names = len(list(set(self._get_table_names()))) if n_table_names != n_unique_names: raise ValueError("There are duplicate table names")
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/schema/database_config.py
0.906129
0.255016
database_config.py
pypi
# pylint: disable=duplicate-code from typing import Optional import warnings from pydantic.dataclasses import dataclass from pydantic import validator import validators from schematic_db.db_schema.db_schema import ( DatabaseSchema, TableSchema, ForeignKeySchema, ColumnSchema, ColumnDatatype, ) from schematic_db.api_utils.api_utils import ( find_class_specific_properties, get_property_label_from_display_name, is_node_required, get_node_validation_rules, SchematicAPIError, SchematicAPITimeoutError, ) from schematic_db.schema_graph.schema_graph import SchemaGraph from .database_config import DatabaseConfig class NoColumnsWarning(Warning): """ Occurs when a database table has no columns returned from find_class_specific_properties(). """ def __init__(self, message: str) -> None: """ Args: message (str): A message describing the error """ self.message = message super().__init__(self.message) class MoreThanOneTypeRule(Exception): """Raised when an column has more than one validation type rule""" def __init__( self, column_name: str, type_rules: list[str], ): """ Args: column_name (str): The name of the column type_rules (list[str]): A list of the type rules """ self.message = "Attribute has more than one validation type rule" self.column_name = column_name self.type_rules = type_rules super().__init__(self.message) def __str__(self) -> str: return ( f"{self.message}; column name:{self.column_name}; " f"type_rules:{self.type_rules}" ) class ColumnSchematicError(Exception): """Raised when there is an issue getting data from the Schematic API for a column""" def __init__( self, column_name: str, table_name: str, ): """ Args: column_name (str): The name of the column table_name (str): The name of the table """ self.message = ( "There was an issue getting data from the Schematic API for the column" ) self.column_name = column_name self.table_name = table_name super().__init__(self.message) def __str__(self) -> str: return f"{self.message}: column name: {self.column_name}; table_name: {self.table_name}" @dataclass() class SchemaConfig: """ A config for a Schema. Properties: schema_url (str): A url to the jsonld schema file """ schema_url: str @validator("schema_url") @classmethod def validate_url(cls, value: str) -> str: """Validates that the value is a valid URL""" valid_url = validators.url(value) if not valid_url: raise ValueError(f"{value} is a valid url") return value @validator("schema_url") @classmethod def validate_is_jsonld(cls, value: str) -> str: """Validates that the value is a jsonld file""" is_jsonld = value.endswith(".jsonld") if not is_jsonld: raise ValueError(f"{value} does end with '.jsonld'") return value class Schema: """ The Schema class interacts with the Schematic API to create a DatabaseSchema table. """ def __init__( self, config: SchemaConfig, database_config: DatabaseConfig = DatabaseConfig([]), use_display_names_as_labels: bool = False, ) -> None: """ The Schema class handles interactions with the schematic API. The main responsibilities are creating the database schema, and retrieving manifests. Args: config (SchemaConfig): A config describing the basic inputs for the schema table database_config (DatabaseConfig): Experimental and will be deprecated in the near future. A config describing optional database specific columns. use_display_names_as_labels(bool): Experimental and will be deprecated in the near future. Use when display names and labels are the same in the schema. """ self.database_config = database_config self.schema_url = config.schema_url self.use_display_names_as_labels = use_display_names_as_labels self.schema_graph = SchemaGraph(config.schema_url) self.database_schema: Optional[DatabaseSchema] = None def get_database_schema(self) -> DatabaseSchema: """Gets the current database schema Returns: DatabaseSchema: the current database schema """ # When first initialized, database schema is None if self.database_schema is None: self.update_database_schema() assert self.database_schema is not None return self.database_schema def update_database_schema(self) -> None: """Updates the database schema.""" table_names = self.schema_graph.create_sorted_table_name_list() table_schemas = [ schema for schema in [self._create_table_schema(name) for name in table_names] if schema is not None ] self.database_schema = DatabaseSchema(table_schemas) def _create_table_schema(self, table_name: str) -> Optional[TableSchema]: """Creates the the schema for one table in the database, if any column schemas can be created. Args: table_name (str): The name of the table the schema will be created for. Returns: Optional[TableSchema]: The config for the table if the table has columns otherwise None. """ # Some components will not have any columns for various reasons columns = self._create_column_schemas(table_name) if not columns: return None return TableSchema( name=table_name, columns=columns, primary_key=self._get_primary_key(table_name), foreign_keys=self._get_foreign_keys(table_name), ) def _create_column_schemas( self, table_name: str, ) -> Optional[list[ColumnSchema]]: """Create the column schemas for the table, if any can be created. Args: table_name (str): The name of the table to create the column schemas for Returns: Optional[list[ColumnSchema]]: A list of columns in ColumnSchema form """ # the names of the columns to be created, in label(not display) form column_names = find_class_specific_properties(self.schema_url, table_name) columns = [ self._create_column_schema(name, table_name) for name in column_names ] # Some Tables will not have any columns for various reasons if not columns: warnings.warn( NoColumnsWarning( f"Table {table_name} has no columns, and will be skipped." ) ) return None return columns def _create_column_schema(self, column_name: str, table_name: str) -> ColumnSchema: """Creates a schema for column Args: column_name (str): The name of the column table_name (str): The name of the table Returns: ColumnSchema: The schema for the column """ column = self.database_config.get_column(table_name, column_name) # Use column config if provided if column is not None: return column # Create column config if not provided return ColumnSchema( name=column_name, datatype=self._get_column_datatype(column_name, table_name), required=self._is_column_required(column_name, table_name), index=False, ) def _is_column_required(self, column_name: str, table_name: str) -> bool: """Determines if the column is required in the schema Args: column_name (str): The name of the column table_name (str): The name of the table Raises: ColumnSchematicError: Raised when there is an issue with getting a result from the schematic API Returns: bool: Is the column required? """ try: is_column_required = is_node_required(self.schema_url, column_name) except (SchematicAPIError, SchematicAPITimeoutError) as exc: raise ColumnSchematicError(column_name, table_name) from exc return is_column_required def _get_column_datatype(self, column_name: str, table_name: str) -> ColumnDatatype: """Gets the datatype for the column Args: column_name (str): The name of the column table_name (str): The name of the table Raises: ColumnSchematicError: Raised when there is an issue with getting a result from the schematic API MoreThanOneTypeRule: Raised when the Schematic API returns more than one rule that indicate the columns datatype Returns: ColumnDatatype: The columns datatype """ datatypes = { "str": ColumnDatatype.TEXT, "float": ColumnDatatype.FLOAT, "num": ColumnDatatype.FLOAT, "int": ColumnDatatype.INT, "date": ColumnDatatype.DATE, } # Try to get validation rules from Schematic API try: all_validation_rules = get_node_validation_rules( self.schema_url, column_name ) except (SchematicAPIError, SchematicAPITimeoutError) as exc: raise ColumnSchematicError(column_name, table_name) from exc # Try to get type from validation rules type_validation_rules = [ rule for rule in all_validation_rules if rule in datatypes ] if len(type_validation_rules) > 1: raise MoreThanOneTypeRule(column_name, type_validation_rules) if len(type_validation_rules) == 1: return datatypes[type_validation_rules[0]] # Default to text if there are no validation type rules return ColumnDatatype.TEXT def _get_primary_key(self, table_name: str) -> str: """Get the primary key for the column Args: table_name (str): The name of the column Returns: str: The primary key of the column """ # Attempt to get the primary key from the config primary_key_attempt = self.database_config.get_primary_key(table_name) # Check if the primary key is in the config, otherwise assume "id" if primary_key_attempt is None: return "id" return primary_key_attempt def _get_foreign_keys(self, table_name: str) -> list[ForeignKeySchema]: """Gets a list of foreign keys for an table in the database Args: table_name (str): The name of the table the config will be created for. Returns: list[ForeignKeySchema]: A list of foreign keys for the table. """ # Attempt to get foreign keys from config foreign_keys_attempt = self.database_config.get_foreign_keys(table_name) # If there are no foreign keys in config use schema graph to create foreign keys if foreign_keys_attempt is None: return self._create_foreign_keys(table_name) return foreign_keys_attempt def _create_foreign_keys(self, table_name: str) -> list[ForeignKeySchema]: """Create a list of foreign keys an table in the database using the schema graph Args: table_name (str): The name of the table Returns: list[ForeignKeySchema]: A list of foreign """ # Uses the schema graph to find tables the current table depends on parent_table_names = self.schema_graph.get_neighbors(table_name) # Each parent of the current table needs a foreign key to that parent return [self._create_foreign_key(name) for name in parent_table_names] def _create_foreign_key(self, foreign_table_name: str) -> ForeignKeySchema: """Creates a foreign key table Args: foreign_table_name (str): The name of the table the foreign key is referring to. Returns: ForeignKeySchema: A foreign key table. """ # Assume the foreign key name is <table_name>_id where the table name is the # name of the table the column the foreign key is in column_name = self._get_column_name(f"{foreign_table_name}_id") attempt = self.database_config.get_primary_key(foreign_table_name) foreign_column_name = "id" if attempt is None else attempt return ForeignKeySchema(column_name, foreign_table_name, foreign_column_name) def _get_column_name(self, column_name: str) -> str: """Gets the column name of a manifest column Args: column_name (str): The name of the column Returns: str: The column name of the column """ if self.use_display_names_as_labels: return column_name return get_property_label_from_display_name(self.schema_url, column_name)
/schematic_db-0.0.31-py3-none-any.whl/schematic_db/schema/schema.py
0.90839
0.252511
schema.py
pypi
# Schematic [![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2FSage-Bionetworks%2Fschematic%2Fbadge%3Fref%3Ddevelop&style=flat)](https://actions-badge.atrox.dev/Sage-Bionetworks/schematic/goto?ref=develop) [![Documentation Status](https://readthedocs.org/projects/sage-schematic/badge/?version=develop)](https://sage-schematic.readthedocs.io/en/develop/?badge=develop) [![PyPI version](https://badge.fury.io/py/schematicpy.svg)](https://badge.fury.io/py/schematicpy) # Table of contents - [Introduction](#introduction) - [Installation](#installation) - [Installation Requirements](#installation-requirements) - [Installation guide for data curator app](#installation-guide-for-data-curator-app) - [Installation guide for developers/contributors](#installation-guide-for-developerscontributors) - [Other Contribution Guidelines](#other-contribution-guidelines) - [Update readthedocs documentation](#update-readthedocs-documentation) - [Command Line Usage](#command-line-usage) - [Testing](#testing) - [Updating Synapse test resources](#updating-synapse-test-resources) - [Code Style](#code-style) - [Contributors](#contributors) # Introduction SCHEMATIC is an acronym for _Schema Engine for Manifest Ingress and Curation_. The Python based infrastructure provides a _novel_ schema-based, metadata ingress ecosystem, that is meant to streamline the process of biomedical dataset annotation, metadata validation and submission to a data repository for various data contributors. # Installation ## Installation Requirements * Python version 3.9.0≤x<3.11.0 Note: You need to be a registered and certified user on [`synapse.org`](https://www.synapse.org/), and also have the right permissions to download the Google credentials files from Synapse. ## Installation guide for data curator app Create and activate a virtual environment within which you can install the package: ``` python3 -m venv .venv source .venv/bin/activate ``` Note: Python 3 has a built-in support for virtual environment [venv](https://docs.python.org/3/library/venv.html#module-venv) so you no longer need to install virtualenv. Install and update the package using [pip](https://pip.pypa.io/en/stable/quickstart/): ``` python3 -m pip install schematicpy ``` If you run into error: Failed building wheel for numpy, the error might be able to resolve by upgrading pip. Please try to upgrade pip by: ``` pip3 install --upgrade pip ``` ## Installation guide for developers/contributors When contributing to this repository, please first discuss the change you wish to make via issue, email, or any other method with the owners of this repository before making a change. Please note we have a [code of conduct](CODE_OF_CONDUCT.md), please follow it in all your interactions with the project. ### Development environment setup 1. Clone the `schematic` package repository. ``` git clone https://github.com/Sage-Bionetworks/schematic.git ``` 2. Install `poetry` (version 1.2 or later) using either the [official installer](https://python-poetry.org/docs/#installing-with-the-official-installer) or [pipx](https://python-poetry.org/docs/#installing-with-pipx). If you have an older installation of Poetry, we recommend uninstalling it first. 3. Start the virtual environment by doing: ``` poetry shell ``` 4. Install the dependencies by doing: ``` poetry install ``` This command will install the dependencies based on what we specify in poetry.lock. If this step is taking a long time, try to go back to step 2 and check your version of poetry. Alternatively, you could also try deleting the lock file and regenerate it by doing `poetry install` (Please note this method should be used as a last resort because this would force other developers to change their development environment) 5. Fill in credential files: *Note*: If you won't interact with Synapse, please ignore this section. There are two main configuration files that need to be edited: config.yml and [synapseConfig](https://raw.githubusercontent.com/Sage-Bionetworks/synapsePythonClient/v2.3.0-rc/synapseclient/.synapseConfig) <strong>Configure .synapseConfig File</strong> Download a copy of the ``.synapseConfig`` file, open the file in the editor of your choice and edit the `username` and `authtoken` attribute under the `authentication` section *Note*: You could also visit [configparser](https://docs.python.org/3/library/configparser.html#module-configparser>) doc to see the format that `.synapseConfig` must have. For instance: >[authentication]<br> username = ABC <br> authtoken = abc <strong>Configure config.yml File</strong> There are some defaults in schematic that can be configured. These fields are in ``config_example.yml``: ```text # This is an example config for Schematic. # All listed values are those that are the default if a config is not used. # Save this as config.yml, this will be gitignored. # Remove any fields in the config you don't want to change # Change the values of any fields you do want to change # This describes where assets such as manifests are stored asset_store: # This is when assets are stored in a synapse project synapse: # Synapse ID of the file view listing all project data assets. master_fileview_id: "syn23643253" # Path to the synapse config file, either absolute or relative to this file config: ".synapseConfig" # Base name that manifest files will be saved as manifest_basename: "synapse_storage_manifest" # This describes information about manifests as it relates to generation and validation manifest: # Location where manifests will saved to manifest_folder: "manifests" # Title or title prefix given to generated manifest(s) title: "example" # Data types of manifests to be generated or data type (singular) to validate manifest against data_type: - "Biospecimen" - "Patient" # Describes the location of your schema model: # Location of your schema jsonld, it must be a path relative to this file or absolute location: "tests/data/example.model.jsonld" # This section is for using google sheets with Schematic google_sheets: # The Synapse id of the Google service account credentials. service_acct_creds_synapse_id: "syn25171627" # Path to the synapse config file, either absolute or relative to this file service_acct_creds: "schematic_service_account_creds.json" # When doing google sheet validation (regex match) with the validation rules. # true is alerting the user and not allowing entry of bad values. # false is warning but allowing the entry on to the sheet. strict_validation: true ``` If you want to change any of these copy ``config_example.yml`` to ``config.yml``, change any fields you want to, and remove any fields you don't. For example if you wanted to change the folder where manifests are downloaded your config should look like: ```text manifest: manifest_folder: "my_manifest_folder_path" ``` _Note_: `config.yml` is ignored by git. _Note_: Paths can be specified relative to the `config.yml` file or as absolute paths. 6. Login to Synapse by using the command line On the CLI in your virtual environment, run the following command: ``` synapse login -u <synapse username> -p <synapse password> --rememberMe ``` Please make sure that you run the command before running `schematic init` below 7. Obtain Google credential Files To obtain ``schematic_service_account_creds.json``, please run: ``` schematic init --config ~/path/to/config.yml ``` > As v22.12.1 version of schematic, using `token` mode of authentication (in other words, using `token.pickle` and `credentials.json`) is no longer supported due to Google's decision to move away from using OAuth out-of-band (OOB) flow. Click [here](https://developers.google.com/identity/protocols/oauth2/resources/oob-migration) to learn more. *Notes*: Use the ``schematic_service_account_creds.json`` file for the service account mode of authentication (*for Google services/APIs*). Service accounts are special Google accounts that can be used by applications to access Google APIs programmatically via OAuth2.0, with the advantage being that they do not require human authorization. *Background*: schematic uses Google’s API to generate google sheet templates that users fill in to provide (meta)data. Most Google sheet functionality could be authenticated with service account. However, more complex Google sheet functionality requires token-based authentication. As browser support that requires the token-based authentication diminishes, we are hoping to deprecate token-based authentication and keep only service account authentication in the future. ### Development process instruction For new features, bugs, enhancements 1. Pull the latest code from [develop branch in the upstream repo](https://github.com/Sage-Bionetworks/schematic) 2. Checkout a new branch develop-<feature/fix-name> from the develop branch 3. Do development on branch develop-<feature/fix-name> a. may need to ensure that schematic poetry toml and lock files are compatible with your local environment 4. Add changed files for tracking and commit changes using [best practices](https://www.perforce.com/blog/vcs/git-best-practices-git-commit) 5. Have granular commits: not “too many” file changes, and not hundreds of code lines of changes 6. Commits with work in progress are encouraged: a. add WIP to the beginning of the commit message for “Work In Progress” commits 7. Keep commit messages descriptive but less than a page long, see best practices 8. Push code to develop-<feature/fix-name> in upstream repo 9. Branch out off develop-<feature/fix-name> if needed to work on multiple features associated with the same code base 10. After feature work is complete and before creating a PR to the develop branch in upstream a. ensure that code runs locally b. test for logical correctness locally c. wait for git workflow to complete (e.g. tests are run) on github 11. Create a PR from develop-<feature/fix-name> into the develop branch of the upstream repo 12. Request a code review on the PR 13. Once code is approved merge in the develop branch 14. Delete the develop-<feature/fix-name> branch *Note*: Make sure you have the latest version of the `develop` branch on your local machine. ## Installation Guide - Docker 1. Install docker from https://www.docker.com/ . <br> 2. Identify docker image of interest from [Schematic DockerHub](https://hub.docker.com/r/sagebionetworks/schematic/tags) <br> Ex `docker pull sagebionetworks/schematic:latest` from the CLI or, run `docker compose up` after cloning the schematic github repo <br> in this case, `sagebionetworks/schematic:latest` is the name of the image chosen 3. Run Schematic Command with `docker run <flags> <schematic command and args>`. <br> <t> - For more information on flags for `docker run` and what they do, visit the [Docker Documentation](https://docs.docker.com/engine/reference/commandline/run/) <br> <t> - These example commands assume that you have navigated to the directory you want to run schematic from. To specify your working directory, use `$(pwd)` on MacOS/Linux or `%cd%` on Windows. <br> <t> - If not using the latest image, then the full name should be specified: ie `sagebionetworks/schematic:commit-e611e4a` <br> <t> - If using local image created by `docker compose up`, then the docker image name should be changed: i.e. `schematic_schematic` <br> <t> - Using the `--name` flag sets the name of the container running locally on your machine <br> ### Example For REST API <br> #### Use file path of `config.yml` to run API endpoints: ``` docker run --rm -p 3001:3001 \ -v $(pwd):/schematic -w /schematic --name schematic \ -e SCHEMATIC_CONFIG=/schematic/config.yml \ -e GE_HOME=/usr/src/app/great_expectations/ \ sagebionetworks/schematic \ python /usr/src/app/run_api.py ``` #### Use content of `config.yml` and `schematic_service_account_creds.json`as an environment variable to run API endpoints: 1. save content of `config.yml` as to environment variable `SCHEMATIC_CONFIG_CONTENT` by doing: `export SCHEMATIC_CONFIG_CONTENT=$(cat /path/to/config.yml)` 2. Similarly, save the content of `schematic_service_account_creds.json` as `SERVICE_ACCOUNT_CREDS` by doing: `export SERVICE_ACCOUNT_CREDS=$(cat /path/to/schematic_service_account_creds.json)` 3. Pass `SCHEMATIC_CONFIG_CONTENT` and `schematic_service_account_creds` as environment variables by using `docker run` ``` docker run --rm -p 3001:3001 \ -v $(pwd):/schematic -w /schematic --name schematic \ -e GE_HOME=/usr/src/app/great_expectations/ \ -e SCHEMATIC_CONFIG_CONTENT=$SCHEMATIC_CONFIG_CONTENT \ -e SERVICE_ACCOUNT_CREDS=$SERVICE_ACCOUNT_CREDS \ sagebionetworks/schematic \ python /usr/src/app/run_api.py ``` ### Example For Schematic on mac/linux <br> To run example below, first clone schematic into your home directory `git clone https://github.com/sage-bionetworks/schematic ~/schematic` <br> Then update .synapseConfig with your credentials ``` docker run \ -v ~/schematic:/schematic \ -w /schematic \ -e SCHEMATIC_CONFIG=/schematic/config.yml \ -e GE_HOME=/usr/src/app/great_expectations/ \ sagebionetworks/schematic schematic model \ -c /schematic/config.yml validate \ -mp /schematic/tests/data/mock_manifests/Valid_Test_Manifest.csv \ -dt MockComponent \ -js /schematic/tests/data/example.model.jsonld ``` ### Example For Schematic on Windows <br> ``` docker run -v %cd%:/schematic \ -w /schematic \ -e GE_HOME=/usr/src/app/great_expectations/ \ sagebionetworks/schematic \ schematic model \ -c config.yml validate -mp tests/data/mock_manifests/inValid_Test_Manifest.csv -dt MockComponent -js /schematic/data/example.model.jsonld ``` # Other Contribution Guidelines ## Updating readthedocs documentation 1. `cd docs` 2. After making relevant changes, you could run the `make html` command to re-generate the `build` folder. 3. Please contact the dev team to publish your updates *Other helpful resources*: 1. [Getting started with Sphinx](https://haha.readthedocs.io/en/latest/intro/getting-started-with-sphinx.html) 2. [Installing Sphinx](https://haha.readthedocs.io/en/latest/intro/getting-started-with-sphinx.html) ## Update toml file and lock file If you install external libraries by using `poetry add <name of library>`, please make sure that you include `pyproject.toml` and `poetry.lock` file in your commit. ## Reporting bugs or feature requests You can **create bug and feature requests** through [Sage Bionetwork's FAIR Data service desk](https://sagebionetworks.jira.com/servicedesk/customer/portal/5/group/8). Providing enough details to the developers to verify and troubleshoot your issue is paramount: - **Provide a clear and descriptive title as well as a concise summary** of the issue to identify the problem. - **Describe the exact steps which reproduce the problem** in as many details as possible. - **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior. - **Explain which behavior you expected to see** instead and why. - **Provide screenshots of the expected or actual behaviour** where applicable. # Command Line Usage Please visit more documentation [here](https://sage-schematic.readthedocs.io/en/develop/cli_reference.html) # Testing All code added to the client must have tests. The Python client uses pytest to run tests. The test code is located in the [tests](https://github.com/Sage-Bionetworks/schematic/tree/develop-docs-update/tests) subdirectory. You can run the test suite in the following way: ``` pytest -vs tests/ ``` ## Updating Synapse test resources 1. Duplicate the entity being updated (or folder if applicable). 2. Edit the duplicates (_e.g._ annotations, contents, name). 3. Update the test suite in your branch to use these duplicates, including the expected values in the test assertions. 4. Open a PR as per the usual process (see above). 5. Once the PR is merged, leave the original copies on Synapse to maintain support for feature branches that were forked from `develop` before your update. - If the old copies are problematic and need to be removed immediately (_e.g._ contain sensitive data), proceed with the deletion and alert the other contributors that they need to merge the latest `develop` branch into their feature branches for their tests to work. # Code style * Please consult the [Google Python style guide](http://google.github.io/styleguide/pyguide.html) prior to contributing code to this project. * Be consistent and follow existing code conventions and spirit. # Contributors Main contributors and developers: - [Milen Nikolov](https://github.com/milen-sage) - [Mialy DeFelice](https://github.com/mialy-defelice) - [Sujay Patil](https://github.com/sujaypatil96) - [Bruno Grande](https://github.com/BrunoGrandePhD) - [Robert Allaway](https://github.com/allaway) - [Gianna Jordan](https://github.com/giajordan) - [Lingling Peng](https://github.com/linglp)
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/README.md
0.571527
0.94428
README.md
pypi
from typing import Any from collections.abc import Iterable from errno import ENOENT from os import pathsep from re import split from pkg_resources import ( resource_exists, resource_filename, resource_stream, resource_string, resource_listdir, ) class InvalidResourceError(Exception): """ Args: uri {String}: The URI which was requested within the given loader's that did not exist or was malformed. """ def __init__(self, namespace: str, requested_uri: str) -> None: self.namespace = namespace self.requested_uri = requested_uri self.message = "Resource does not exist or is declared incorrectly" self.errno = ENOENT super().__init__(self.message) def __str__(self) -> str: return ( f'{self.message}({self.errno}), "{self.requested_uri}" of {self.namespace}' ) def __repr__(self) -> str: return self.__str__() class Loader: """ Args: namespace {String}: The namespace within the package (relative to the package root) to load resources from. Using the magic variable __name__ is suggested as when the script is run as "__main__" it will load the most recent local resources instead of the cached egg resources. prefix {String}: Set a prefix for all URIs. Use a prefix if resources are centrally located in a single place the uri's will be prefixed automatically by the loader. """ def __init__(self, namespace: str, **opts: Any) -> None: self.namespace = namespace self.prefix = opts.get("prefix", "") self.local = opts.get("local", False) if not self.local: self.namespace = split(r"\.|\\|\/", self.namespace)[0] def _resolve(self, uri: str) -> tuple[str, str]: resource_uri = "/".join([self.prefix] + uri.split(pathsep)) namespace = self.namespace if not resource_exists(namespace, resource_uri): raise InvalidResourceError(namespace, resource_uri) return namespace, resource_uri def read(self, uri: str) -> Any: """ Read entire contents of resource. Same as open('path...').read() Args: uri {String}: URI of the resource. """ namespace, uri = self._resolve(uri) return resource_string(namespace, uri) def open(self, uri: str) -> Any: """ Open a file object like handle to the resource. Same as open('path...') Args: uri {String}: URI of the resource. """ namespace, uri = self._resolve(uri) return resource_stream(namespace, uri) def filename(self, uri: str) -> str: """ Return the "most correct" filename for a resource. Same as os.path.normpath('path...') Args: uri {String}: URI of the resource. """ namespace, uri = self._resolve(uri) return resource_filename(namespace, uri) def list(self, url: str) -> Iterable[str]: """ Return a list of all resources within the given URL Args: url {String}: URL of the resources. """ namespace, uri = self._resolve(url) return map(lambda x: url + "/" + x, resource_listdir(namespace, uri)) # call Loader() and pass `schematic`, which is the global package namespace LOADER = Loader("schematic", prefix="etc")
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/loader.py
0.864182
0.186873
loader.py
pypi
# pylint: disable=line-too-long #!/usr/bin/env python3 # `schematic manifest` related sub-commands description manifest_commands = { "manifest": { "config": ( "Specify the path to the `config.yml` using this option. This is a required argument." ), "get": { "short_help": ( "Specify the path to the `config.yml` using this option. " "This is a required argument." ), "title": ( "Specify the title of the manifest (or title prefix of multiple manifests) that " "will be created at the end of the run. You can either explicitly pass the " "title of the manifest here or provide it in the `config.yml` " "file as a value for the `(manifest > title)` key." ), "data_type": ( "Specify the component(s) (data type) from the data model that is to be used " "for generating the metadata manifest file. To make all available manifests enter 'all manifests'. " "You can either explicitly pass the data type here or provide " "it in the `config.yml` file as a value for the `(manifest > data_type)` key." ), "jsonld": ( "Specify the path to the JSON-LD data model (schema) using this option. You can either explicitly pass the " "schema here or provide a value for the `(model > input > location)` key." ), "dataset_id": ( "Specify the synID of a dataset folder on Synapse. If there is an exisiting manifest already present " "in that folder, then it will be pulled with the existing annotations for further annotation/modification. " ), "sheet_url": ( "This is a boolean flag. If flag is provided when command line utility is executed, result will be a link/URL " "to the metadata manifest file. If not it will produce a pandas dataframe for the same." ), "output_csv": ("Path to where the CSV manifest template should be stored."), "output_xlsx": ( "Path to where the Excel manifest template should be stored." ), "use_annotations": ( "This is a boolean flag. If flag is provided when command line utility is executed, it will prepopulate template " "with existing annotations from Synapse." ), "json_schema": ( "Specify the path to the JSON Validation Schema for this argument. " "You can either explicitly pass the `.json` file here or provide it in the `config.yml` file " "as a value for the `(model > location)` key." ), "alphabetize_valid_values": ( "Specify to alphabetize valid attribute values either ascending (a) or descending (d)." "Optional" ), }, "migrate": { "short_help": ( "Specify the path to the `config.yml` using this option. " "This is a required argument." ), "project_scope": ( "Specify a comma-separated list of projects where manifest entities will be migrated to tables." ), "archive_project": ( "Specify a single project where legacy manifest entities will be stored after migration to table." ), "return_entities": ( "This is a boolean flag. If flag is provided when command line utility is executed, " "entities that have been transferred to an archive project will be returned to their original folders." ), "dry_run": ( "This is a boolean flag. If flag is provided when command line utility is executed, " "a dry run will be performed. No manifests will be re-uploaded and no entities will be migrated, " "but archival folders will still be created. " "Migration information for testing purposes will be logged to the INFO level." ), }, } } # `schematic model` related sub-commands description model_commands = { "model": { "config": ( "Specify the path to the `config.yml` using this option. This is a required argument." ), "submit": { "short_help": ("Validation (optional) and submission of manifest files."), "manifest_path": ( "Specify the path to the metadata manifest file that you want to submit to a dataset on Synapse. " "This is a required argument." ), "dataset_id": ( "Specify the synID of the dataset folder on Synapse to which you intend to submit " "the metadata manifest file. This is a required argument." ), "validate_component": ( "The component or data type from the data model which you can use to validate the " "data filled in your manifest template." ), "use_schema_label": ( "Store attributes using the schema label (--use_schema_label, default) or store attributes using the display label " "(--use_display_label). Attribute display names in the schema must not only include characters that are " "not accepted by Synapse. Annotation names may only contain: letters, numbers, '_' and '.'" ), "hide_blanks": ( "This is a boolean flag. If flag is provided when command line utility is executed, annotations with blank values will be hidden from a dataset's annotation list in Synaspe." "If not, annotations with blank values will be displayed." ), "manifest_record_type": ( "Specify the way the manifest should be store as on Synapse. Options are 'file_only', 'file_and_entities', 'table_and_file' and " "'table_file_and_entities'. 'file_and_entities' will store the manifest as a csv and create Synapse files for each row in the manifest. " "'table_and_file' will store the manifest as a table and a csv on Synapse. " "'file_only' will store the manifest as a csv only on Synapse." "'table_file_and_entities' will perform the options file_with_entites and table in combination." "Default value is 'table_file_and_entities'." ), "table_manipulation": ( "Specify the way the manifest tables should be store as on Synapse when one with the same name already exists. Options are 'replace' and 'upsert'. " "'replace' will remove the rows and columns from the existing table and store the new rows and columns, preserving the name and synID. " "'upsert' will add the new rows to the table and preserve the exisitng rows and columns in the existing table. " "Default value is 'replace'. " "Upsert specific requirements: {\n}" "'upsert' should be used for initial table uploads if users intend to upsert into them at a later time." "Using 'upsert' at creation will generate the metadata necessary for upsert functionality." "Upsert functionality requires primary keys to be specified in the data model and manfiest as <component>_id." "Currently it is required to use -dl/--use_display_label with table upserts." ), }, "validate": { "short_help": ("Validation of manifest files."), "manifest_path": ( "Specify the path to the metadata manifest file that you want to submit to a dataset on Synapse. " "This is a required argument." ), "data_type": ( "Specify the component (data type) from the data model that is to be used " "for validating the metadata manifest file. You can either explicitly pass the data type here or provide " "it in the `config.yml` file as a value for the `(manifest > data_type)` key." ), "json_schema": ( "Specify the path to the JSON Validation Schema for this argument. " "You can either explicitly pass the `.json` file here or provide it in the `config.yml` file " "as a value for the `(model > input > validation_schema)` key." ), "restrict_rules": ( "This is a boolean flag. If flag is provided when command line utility is executed, validation suite will only run with in-house validation rules, " "and Great Expectations rules and suite will not be utilized." "If not, the Great Expectations suite will be utilized and all rules will be available." ), "project_scope": ( "Specify a comma-separated list of projects to search through for cross manifest validation." ), }, } } # `schematic schema` related sub-commands description schema_commands = { "schema": { "convert": { "short_help": ( "Convert specification from CSV data model to JSON-LD data model." ), "base_schema": ( "Path to base data model. BioThings data model is loaded by default." ), "output_jsonld": ( "Path to where the generated JSON-LD file needs to be outputted." ), } } } # `schematic init` command description init_command = { "init": { "short_help": ("Initialize authentication for schematic."), "config": ( "Specify the path to the `config.yml` using this option. This is a required argument." ), } } viz_commands = { "visualization": { "config": ( "Specify the path to the `config.yml` using this option. This is a required argument." ), "tangled_tree": { "figure_type": ( "Specify the type of schema visualization to make. Either 'dependency' or 'component'." ), "text_format": ( "Specify the type of text to gather for tangled tree visualization, either 'plain' or 'highlighted'." ), }, } }
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/help.py
0.74055
0.42483
help.py
pypi
from typing import Optional, Any, Sequence class MissingConfigValueError(Exception): """Exception raised when configuration value not provided in config file. Args: config_keys: tuple of keys as present in config file. message: custom/pre-defined error message to be returned. Returns: message. """ def __init__( self, config_keys: Sequence[Any], message: Optional[str] = None ) -> None: config_keys_str = " > ".join(config_keys) self.message = ( "The configuration value corresponding to the argument " f"({config_keys_str}) doesn't exist. " "Please provide a value in the configuration file." ) if message: self.message = message super().__init__(self.message) def __str__(self) -> str: return f"{self.message}" class WrongEntityTypeError(Exception): """Exception raised when the entity type is not desired Args: entity id: For synapse, thi message: custom/pre-defined error message to be returned. Returns: message. """ def __init__(self, syn_id: str, message: Optional[str] = None) -> None: self.message = ( f"'{syn_id}'' is not a desired entity type" "Please ensure that you put in the right syn_id" ) if message: self.message = message super().__init__(self.message) def __str__(self) -> str: return f"{self.message}" class MissingConfigAndArgumentValueError(Exception): """Exception raised when configuration value not provided in config file. Args: arg_name: CLI argument name. config_keys: tuple of keys as present in config file. message: custom/pre-defined error message to be returned. Returns: message. """ def __init__( self, arg_name: str, config_keys: Sequence[Any], message: Optional[str] = None ) -> None: config_keys_str = " > ".join(config_keys) self.message = ( f"The value corresponding to the CLI argument '--{arg_name}'" " doesn't exist. " "Please provide a value for either the CLI argument or " f"({config_keys_str}) in the configuration file." ) if message: self.message = message super().__init__(self.message) def __str__(self) -> str: return f"{self.message}" class AccessCredentialsError(Exception): """Exception raised when provided access credentials cannot be resolved. Args: project: Platform/project (e.g., synID of a project) message: custom/pre-defined error message to be returned. Returns: message. """ def __init__(self, project: str, message: Optional[str] = None) -> None: self.message = ( f"Your access to '{project}'' could not be resolved. " "Please check your credentials and try again." ) if message: self.message = message super().__init__(self.message) def __str__(self) -> str: return f"{self.message}"
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/exceptions.py
0.94152
0.162879
exceptions.py
pypi
__author__ = "Jaakko Salonen" __copyright__ = "Copyright 2011-2012, Jaakko Salonen" __version__ = "0.5.0" __license__ = "MIT" __status__ = "Prototype" from urllib.parse import unquote from copy import copy from rdflib import BNode, URIRef import sys if sys.version_info.major == 3: unicode = str try: from rdflib import BNode, URIRef except: # Fallback if rdflib is not present class BNode(object): def __init__(self, val): self.val = val def n3(self): return unicode("_:" + self.val) class URIRef(unicode): pass class Curie(object): """Curie Datatype Class Examples: >>> nss = dict(dc='http://purl.org/dc/elements/1.1/') >>> dc_title = Curie('http://purl.org/dc/elements/1.1/title', nss) >>> dc_title.curie u'dc:title' >>> dc_title.uri u'http://purl.org/dc/elements/1.1/title' >>> dc_title.curie u'dc:title' >>> nss['example'] = 'http://www.example.org/' >>> iri_test = Curie('http://www.example.org/D%C3%BCrst', nss) >>> iri_test.uri u'http://www.example.org/D\\xfcrst' >>> iri_test.curie u'example:D%C3%BCrst' """ def __init__(self, uri, namespaces=dict()): self.namespaces = namespaces self.uri = unicode(unquote(uri), "utf-8") self.curie = copy(self.uri) for ns in self.namespaces: self.curie = uri.replace("" + self.namespaces["%s" % ns], "%s:" % ns) def __str__(self): return self.__unicode__() def __unicode__(self): return self.curie def uri2curie(uri, namespaces): """Convert URI to CURIE Define namespaces we want to use: >>> nss = dict(dc='http://purl.org/dc/elements/1.1/') Converting a string URI to CURIE >>> uri2curie('http://purl.org/dc/elements/1.1/title', nss) u'dc:title' RDFLib data type conversions: URIRef to CURIE >>> uri2curie(URIRef('http://purl.org/dc/elements/1.1/title'), nss) u'dc:title' Blank node to CURIE >>> uri2curie(BNode('blanknode1'), nss) u'_:blanknode1' """ # Use n3() method if BNode if isinstance(uri, BNode): result = uri.n3() else: result = uri # result = unicode(uri) for ns in namespaces: ns_raw = "%s" % namespaces["%s" % ns] if ns_raw == "http://www.w3.org/2002/07/owl#uri": ns_raw = "http://www.w3.org/2002/07/owl#" result = result.replace(ns_raw, "%s:" % ns) result = result.replace("http://www.w3.org/2002/07/owl#", "owl:") return result def curie2uri(curie, namespaces): """Convert CURIE to URI TODO: testing """ result = unicode(curie) for ns in namespaces: result = result.replace("%s:" % ns, "" + namespaces["%s" % ns]) return URIRef(result) if __name__ == "__main__": import doctest doctest.testmod()
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/schemas/curie.py
0.478285
0.160562
curie.py
pypi
import os from jsonschema import validate from schematic.utils.io_utils import load_schemaorg, load_json, load_default from schematic.utils.general import str2list, dict2list, find_duplicates from schematic.utils.curie_utils import ( expand_curies_in_schema, extract_name_from_uri_or_curie, ) from schematic.utils.validate_utils import ( validate_class_schema, validate_property_schema, validate_schema, ) class SchemaValidator: """Validate Schema against SchemaOrg standard Validation Criterias: 1. Data Structure wise: > "@id", "@context", "@graph" > Each element in "@graph" should contain "@id", "@type", "rdfs:comment", "rdfs:label", "sms:displayName" > validate against JSON Schema > Should validate the whole structure, and also validate property and value separately 2. Data Content wise: > "@id" field should match with "rdfs:label" field > all prefixes used in the file should be defined in "@context" > There should be no duplicate "@id" > Class specific > rdfs:label field should be capitalize the first character of each word for a class; > the value of "rdfs:subClassOf" should be present in the schema or in the core vocabulary > sms:displayName ideally should contain capitalized words separated by space, but that's not enforced by validation > Property specific > rdfs:label field should be cammelCase > the value of "schema:domainIncludes" should be present in the schema or in the core vocabulary > the value of "schema:rangeIncludes" should be present in the schema or in the core vocabulary > sms:displayName ideally should contain capitalized words separated by space, but that's not enforced by validation TODO: add dependencies and component dependencies to class structure documentation; as well as value range and required property """ def __init__(self, schema): self.schemaorg = {"schema": load_schemaorg(), "classes": [], "properties": []} for _schema in self.schemaorg["schema"]["@graph"]: for _record in _schema["@graph"]: if "@type" in _record: _type = str2list(_record["@type"]) if "rdfs:Property" in _type: self.schemaorg["properties"].append(_record["@id"]) elif "rdfs:Class" in _type: self.schemaorg["classes"].append(_record["@id"]) self.extension_schema = { "schema": expand_curies_in_schema(schema), "classes": [], "properties": [], } for _record in self.extension_schema["schema"]["@graph"]: _type = str2list(_record["@type"]) if "rdfs:Property" in _type: self.extension_schema["properties"].append(_record["@id"]) elif "rdfs:Class" in _type: self.extension_schema["classes"].append(_record["@id"]) self.all_classes = self.schemaorg["classes"] + self.extension_schema["classes"] def validate_class_label(self, label_uri): """Check if the first character of class label is capitalized""" label = extract_name_from_uri_or_curie(label_uri) assert label[0].isupper() def validate_property_label(self, label_uri): """Check if the first character of property label is lower case""" label = extract_name_from_uri_or_curie(label_uri) assert label[0].islower() def validate_subclassof_field(self, subclassof_value): """Check if the value of "subclassof" is included in the schema file""" subclassof_value = dict2list(subclassof_value) for record in subclassof_value: assert record["@id"] in self.all_classes def validate_domainIncludes_field(self, domainincludes_value): """Check if the value of "domainincludes" is included in the schema file """ domainincludes_value = dict2list(domainincludes_value) for record in domainincludes_value: assert record["@id"] in self.all_classes, ( "value of domainincludes not recorded in schema: %r" % domainincludes_value ) def validate_rangeIncludes_field(self, rangeincludes_value): """Check if the value of "rangeincludes" is included in the schema file """ rangeincludes_value = dict2list(rangeincludes_value) for record in rangeincludes_value: assert record["@id"] in self.all_classes def check_whether_atid_and_label_match(self, record): """Check if @id field matches with the "rdfs:label" field""" _id = extract_name_from_uri_or_curie(record["@id"]) assert _id == record["rdfs:label"], "id and label not match: %r" % record def check_duplicate_labels(self): """Check for duplication in the schema""" labels = [ _record["rdfs:label"] for _record in self.extension_schema["schema"]["@graph"] ] duplicates = find_duplicates(labels) try: assert len(duplicates) == 0 except: raise Exception("Duplicates detected in graph: ", duplicates) def validate_schema(self, schema): """Validate schema against SchemaORG standard""" json_schema_path = os.path.join("validation_schemas", "schema.json") json_schema = load_json(json_schema_path) return validate(schema, json_schema) def validate_property_schema(self, schema): """Validate schema against SchemaORG property definition standard""" json_schema_path = os.path.join( "validation_schemas", "property_json_schema.json" ) json_schema = load_json(json_schema_path) return validate(schema, json_schema) def validate_class_schema(self, schema): """Validate schema against SchemaORG class definition standard""" json_schema_path = os.path.join("validation_schemas", "class_json_schema.json") json_schema = load_json(json_schema_path) return validate(schema, json_schema) def validate_full_schema(self): self.check_duplicate_labels() for record in self.extension_schema["schema"]["@graph"]: self.check_whether_atid_and_label_match(record) if record["@type"] == "rdf:Class": self.validate_class_schema(record) self.validate_class_label(record["@id"]) elif record["@type"] == "rdf:Property": self.validate_property_schema(record) self.validate_property_label(record["@id"]) self.validate_domainIncludes_field( record["http://schema.org/domainIncludes"] ) if "http://schema.org/rangeIncludes" in record: self.validate_rangeIncludes_field( record["http://schema.org/rangeIncludes"] )
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/schemas/validator.py
0.449634
0.323848
validator.py
pypi
import inspect import logging from typing import Any, Mapping, Sequence, Union, List from functools import reduce import re logger = logging.getLogger(__name__) # We are using fstrings in logger methods # pylint: disable=logging-fstring-interpolation def query_dict(dictionary: Mapping[Any, Any], keys: Sequence[Any]) -> Union[Any, None]: """Access a nested value in a dictionary corresponding to a series of keys. Args: dictionary: A dictionary containing anything. keys: A sequence of values corresponding to keys in `dictionary` Returns: The nested value corresponding to the given series of keys, or `None` is such a value doesn't exist. """ def extract(dictionary: Any, key: Any) -> Union[Any, None]: """Get value associated with key, defaulting to None.""" if dictionary is None or not isinstance(dictionary, dict): return None return dictionary.get(key) return reduce(extract, keys, dictionary) def log_value_from_config(arg_name: str, config_value: Any): """Logs when getting a value from the config Args: arg_name (str): Name of the argument. Used for logging. config_value (Any): The value in the config """ logger.info( f"The {arg_name} argument is being taken from configuration file, i.e., {config_value}." ) def parse_synIDs( ctx, param, synIDs, ) -> List[str]: """Parse and validate a comma separated string of synIDs Args: ctx: click option context param: click option argument name synIDs: comma separated string of synIDs Returns: List of synID strings Raises: ValueError: If the entire string does not match a regex for a valid comma separated string of SynIDs """ if synIDs: project_regex = re.compile("(syn\d+\,?)+") valid=project_regex.fullmatch(synIDs) if valid: synIDs = synIDs.split(",") return synIDs else: raise ValueError( f"The provided list of project synID(s): {synIDs}, is not formatted correctly. " "\nPlease check your list of projects for errors." ) else: return def parse_comma_str_to_list( ctx, param, comma_string, ) -> List[str]: if comma_string: return comma_string.split(",") else: return None
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/utils/cli_utils.py
0.928676
0.346099
cli_utils.py
pypi
from io import StringIO import json import logging import networkx as nx import numpy as np import os from os import path import pandas as pd # allows specifying explicit variable types from typing import Any, Dict, Optional, Text, List from schematic.utils.viz_utils import visualize from schematic.visualization.attributes_explorer import AttributesExplorer from schematic.schemas.explorer import SchemaExplorer from schematic.schemas.generator import SchemaGenerator from schematic import LOADER from schematic.utils.io_utils import load_json from copy import deepcopy # Make sure to have newest version of decorator logger = logging.getLogger(__name__) #OUTPUT_DATA_DIR = str(Path('tests/data/visualization/AMPAD').resolve()) #DATA_DIR = str(Path('tests/data').resolve()) class TangledTree(object): """ """ def __init__(self, path_to_json_ld: str, figure_type: str, ) -> None: # Load jsonld self.path_to_json_ld = path_to_json_ld self.json_data_model = load_json(self.path_to_json_ld) # Parse schema name self.schema_name = path.basename(self.path_to_json_ld).split(".model.jsonld")[0] # Instantiate a schema generator to retrieve db schema graph from metadata model graph self.sg = SchemaGenerator(self.path_to_json_ld) # Get metadata model schema graph self.G = self.sg.se.get_nx_schema() # Set Parameters self.figure_type = figure_type.lower() self.dependency_type = ''.join(('requires', self.figure_type.capitalize())) # Get names self.schema = load_json(self.path_to_json_ld) self.schema_abbr = self.schema_name.split('_')[0] # Initialize AttributesExplorer self.ae = AttributesExplorer(self.path_to_json_ld) # Create output paths. self.text_csv_output_path = self.ae.create_output_path('text_csv') self.json_output_path = self.ae.create_output_path('tangled_tree_json') def strip_double_quotes(self, string): # Remove double quotes from beginning and end of string. if string.startswith('"') and string.endswith('"'): string = string[1:-1] # now remove whitespace string = "".join(string.split()) return string def get_text_for_tangled_tree(self, text_type, save_file=False): '''Gather the text that needs to be either higlighted or plain for the tangled tree visualization. Args: text_type (str): Choices = ['highlighted', 'plain'], determines the type of text rendering to return. save_file (bool): Determines if the outputs should be saved to disk or returned. Returns: If save_file==True: Saves plain or highlighted text as a CSV (to disk). save_file==False: Returns plain or highlighted text as a csv string. ''' # Get nodes in the digraph, many more nodes returned if figure type is dependency cdg = self.sg.se.get_digraph_by_edge_type(self.dependency_type) nodes = cdg.nodes() if self.dependency_type == 'requiresComponent': component_nodes = nodes else: # get component nodes if making dependency figure component_dg = self.sg.se.get_digraph_by_edge_type('requiresComponent') component_nodes = component_dg.nodes() # Initialize lists highlighted = [] plain = [] # For each component node in the tangled tree gather the plain and higlighted text. for node in component_nodes: # Get the highlighted components based on figure_type if self.figure_type == 'component': highlight_descendants = self.sg.se.get_descendants_by_edge_type(node, 'requiresComponent') elif self.figure_type == 'dependency': highlight_descendants = [node] # Format text to be higlighted and gather text to be formated plain. if not highlight_descendants: # If there are no highlighted descendants just highlight the selected node (format for observable.) highlighted.append([node, "id", node]) # Gather all the text as plain text. plain_descendants = [n for n in nodes if n != node] else: # Format higlighted text for Observable. for hd in highlight_descendants: highlighted.append([node, "id", hd]) # Gather the non-higlighted text as plain text descendants. plain_descendants = [node for node in nodes if node not in highlight_descendants] # Format all the plain text for observable. for nd in plain_descendants: plain.append([node, "id", nd]) # Prepare df depending on what type of text we need. df = pd.DataFrame(locals()[text_type.lower()], columns = ['Component', 'type', 'name']) # Depending on input either export csv locally to disk or as a string. if save_file==True: file_name = f"{self.schema_abbr}_{self.figure_type}_{text_type}.csv" df.to_csv(os.path.join(self.text_csv_output_path, file_name)) return elif save_file==False: return df.to_csv() def get_topological_generations(self): ''' Gather topological_gen, nodes and edges based on figure type. Outputs: topological_gen (List(list)):list of lists. Indicates layers of nodes. nodes: (Networkx NodeView) Nodes of the component or dependency graph. When iterated over it functions like a list. edges: (Networkx EdgeDataView) Edges of component or dependency graph. When iterated over it works like a list of tuples. ''' # Get nodes in the digraph digraph = self.sg.se.get_digraph_by_edge_type(self.dependency_type) nodes = digraph.nodes() # Get subgraph mm_graph = self.sg.se.get_nx_schema() subg = self.sg.get_subgraph_by_edge_type(mm_graph, self.dependency_type) # Get edges and topological_gen based on figure type. if self.figure_type == 'component': edges = digraph.edges() topological_gen = list(reversed(list(nx.topological_generations(subg)))) elif self.figure_type == 'dependency': rev_digraph = nx.DiGraph.reverse(digraph) edges = rev_digraph.edges() topological_gen = list(nx.topological_generations(subg)) return topological_gen, nodes, edges, subg def remove_unwanted_characters_from_conditional_statement(self, cond_req: str) -> str: '''Remove unwanted characters from conditional statement Example of conditional requirement: If File Format IS "BAM" OR "CRAM" OR "CSV/TSV" then Genome Build is required Example output: File Format IS "BAM" OR "CRAM" OR "CSV/TSV" ''' if "then" in cond_req: # remove everything after "then" cond_req_new = cond_req.split('then')[0] # remove "If" and empty space cond_req = cond_req_new.replace("If", "").lstrip().rstrip() return cond_req def get_ca_alias(self, conditional_requirements: list) -> dict: '''Get the alias for each conditional attribute. NOTE: Obtaining attributes(attr) and aliases(ali) in this function is specific to how formatting is set in AttributesExplorer. If that formatting changes, this section will likely break or in the worst case have a silent error. Input: conditional_requirements_list (list): list of strings of conditional requirements from outputs of AttributesExplorer. Output: ca_alias (dict): key: alias (attribute response) value: attribute ''' ca_alias = {} # clean up conditional requirements conditional_requirements = [self.remove_unwanted_characters_from_conditional_statement(req) for req in conditional_requirements] for i, req in enumerate(conditional_requirements): if "OR" not in req: attr, ali = req.split(' is ') attr = "".join(attr.split()) ali = self.strip_double_quotes(ali) ca_alias[ali] = attr else: attr, alias_str = req.split(' is ') alias_lst = alias_str.split(' OR ') for elem in alias_lst: elem = self.strip_double_quotes(elem) ca_alias[elem] = attr return ca_alias def gather_component_dependency_info(self, cn, attributes_df): '''Gather all component dependency information. Inputs: cn: (str) component name attributes_df: (Pandas DataFrame) Details for all attributes across all components. From AttributesExplorer. Outputs: conditional_attributes (list): List of conditional attributes for a particular component ca_alias (dict): key: alias (attribute response) value: attribute all_attributes (list): all attributes associated with a particular component. ''' # Gather all component dependency information component_attributes = self.sg.get_descendants_by_edge_type( cn, self.dependency_type, connected=True ) # Dont want to display `Component` in the figure so remove if 'Component' in component_attributes: component_attributes.remove('Component') # Gather conditional attributes so they can be added to the figure. if 'Cond_Req' in attributes_df.columns: conditional_attributes = list(attributes_df[(attributes_df['Cond_Req']==True) &(attributes_df['Component']==cn)]['Label']) ca_df = attributes_df[(attributes_df['Cond_Req']==True)&(attributes_df['Component']==cn)] conditional_requirements = list(attributes_df[(attributes_df['Cond_Req']==True) &(attributes_df['Component']==cn)]['Conditional Requirements']) ca_alias = self.get_ca_alias(conditional_requirements) else: # If there are no conditional attributes/requirements, initialize blank lists. conditional_attributes = [] ca_alias = {} # Gather a list of all attributes for the current component. all_attributes = list(np.append(component_attributes,conditional_attributes)) return conditional_attributes, ca_alias, all_attributes def find_source_nodes(self, nodes, edges, all_attributes=[]): '''Find all nodes in the graph that do not have a parent node. Inputs: nodes: (Networkx NodeView) Nodes of the component or dependency graph. When iterated over it functions like a list. edges: (Networkx EdgeDataView) Edges of component or dependency graph. When iterated over it works like a list of tuples. attributes_df: (Pandas DataFrame) Details for all attributes across all components. From AttributesExplorer. Outputs: source_nodes (list(str)): List of parentless nodes in ''' # Find edges that are not source nodes. not_source = [] for node in nodes: for edge_pair in edges: if node == edge_pair[0]: not_source.append(node) # Find source nodes as nodes that are not in not_source. source_nodes = [] for node in nodes: if self.figure_type == 'dependency': if node not in not_source and node in all_attributes: source_nodes.append(node) else: if node not in not_source: source_nodes.append(node) return source_nodes def get_parent_child_dictionary(self, nodes, edges, all_attributes=[]): '''Based on the dependency type, create dictionaries between parent and child and child and parent attributes. Input: nodes: (Networkx NodeView) Nodes of the component or dependency graph. edges: (Networkx EdgeDataView (component figure) or List(list) (dependency figure)) Edges of component or dependency graph. all_attributes: Output: child_parents (dict): key: child value: list of the childs parents parent_children (dict): key: parent value: list of the parents children ''' child_parents = {} parent_children = {} if self.dependency_type == 'requiresComponent': # Construct child_parents dictionary for edge in edges: # Add child as a key if edge[0] not in child_parents.keys(): child_parents[edge[0]] = [] # Add parents to list child_parents[edge[0]].append(edge[1]) # Construct parent_children dictionary for edge in edges: # Add parent as a key if edge[1] not in parent_children.keys(): parent_children[edge[1]] = [] # Add children to list parent_children[edge[1]].append(edge[0]) elif self.dependency_type == 'requiresDependency': # Construct child_parents dictionary for edge in edges: # Check if child is an attribute for the current component if edge[0] in all_attributes: # Add child as a key if edge[0] not in child_parents.keys(): child_parents[edge[0]] = [] # Add parent to list if it is an attriute for the current component if edge[1] in all_attributes: child_parents[edge[0]].append(edge[1]) # Construct parent_children dictionary for edge in edges: # Check if parent is an attribute for the current component if edge[1] in all_attributes: # Add parent as a key if edge[1] not in parent_children.keys(): parent_children[edge[1]] = [] # Add child to list if it is an attriute for the current component if edge[0] in all_attributes: parent_children[edge[1]].append(edge[0]) return child_parents, parent_children def alias_edges(self, ca_alias:dict, edges) -> List[list]: '''Create new edges based on aliasing between an attribute and its response. Purpose: Create aliased edges. For example: If BiospecimenType (attribute) is AnalyteBiospecimenType (response) Then ShippingConditionType (conditional requirement) is now required. In the model the edges that connect these options are: (AnalyteBiospecimenType, BiospecimenType) (ShippingConditionType, AnalyteBiospecimenType) Use alias defined in self.get_ca_alias along to define new edges that would directly link attributes to their conditional requirements, in this example the new edge would be: [ShippingConditionType, BiospecimenType] Inputs: ca_alias (dict): key: alias (attribute response) value: attribute edges (Networkx EdgeDataView): Edges of component or dependency graph. When iterated over it works like a list of tuples. Output: aliased_edges (List[lists]) of aliased edges. ''' aliased_edges = [] for i, edge in enumerate(edges): # construct one set of edges at a time edge_set = [] # If the first edge has an alias add alias to the first position in the current edge set if edge[0] in ca_alias.keys(): edge_set.append(ca_alias[edge[0]]) # Else add the non-aliased edge else: edge_set.append(edge[0]) # If the secod edge has an alias add alias to the first position in the current edge set if edge[1] in ca_alias.keys(): edge_set.append(ca_alias[edge[1]]) # Else add the non-aliased edge else: edge_set.append(edge[1]) # Add new edge set to a the list of aliased edges. aliased_edges.append(edge_set) return aliased_edges def prune_expand_topological_gen(self, topological_gen, all_attributes, conditional_attributes): ''' Purpose: Remake topological_gen with only relevant nodes. This is necessary since for the figure this function is being used in we only want to display a portion of the graph data. In addition to only displaying relevant nodes, we want to add conditional attributes to topological_gen so we can visualize them in the tangled tree as well. Input: topological_gen (List[list]): Indicates layers of nodes. all_attributes (list): all attributes associated with a particular component. conditional_attributes (list): List of conditional attributes for a particular component Output: new_top_gen (List[list]): mimics structure of topological_gen but only includes the nodes we want ''' pruned_topological_gen = [] # For each layer(gen) in the topological generation list for i, layer in enumerate(topological_gen): current_layer = [] next_layer = [] # For each node in the layer for node in layer: # If the node is relevant to this component and is not a conditional attribute add it to the current layer. if node in all_attributes and node not in conditional_attributes: current_layer.append(node) # If its a conditional attribute add it to a followup layer. if node in conditional_attributes: next_layer.append(node) # Added layers to new pruned_topological_gen list if current_layer: pruned_topological_gen.append(current_layer) if next_layer: pruned_topological_gen.append(next_layer) return pruned_topological_gen def get_base_layers(self, topological_gen, child_parents, source_nodes, cn): ''' Purpose: Reconfigure topological gen to move things back appropriate layers if they would have a back reference. The Tangle Tree figure requrires an acyclic directed graph that has additional layering rules between connected nodes. - If there is a backward connection then the line connecting them will break (this would suggest a cyclic connection.) - Additionally if two or more nodes are connecting to a downstream node it is best to put both parent nodes at the same level, if possible, to prevent line breaks. - Also want to move any children nodes one layer below the parent node(s). If there are multiple parents, put one layer below the parent that is furthest from the origin. This is an iterative process that needs to run twice to move all the nodes to their appropriate positions. Input: topological_gen: list of lists. Indicates layers of nodes. child_parents (dict): key: child value: list of the childs parents source_nodes: list, list of nodes that do not have a parent. cn: str, component name, default='' Output: base_layers: dict, key: component name, value: layer represents initial layering of toplogical_gen base_layers_copy_copy: dict, key: component name, value: layer represents the final layering after moving the components/attributes to their desired layer.c ''' # Convert topological_gen to a dictionary base_layers = {com:i for i, lev in enumerate(topological_gen) for com in lev} # Make another version to iterate on -- Cant set to equal or will overwrite the original. base_layers_copy = {com:i for i, lev in enumerate(topological_gen) for com in lev} # Move child nodes one node downstream of their parents. for level in topological_gen: for node in level: # Check if node has a parent. if node in child_parents.keys(): #node_level = base_layers[node] # Look at the parents for the node. parent_levels = [] for par in child_parents[node]: # Get the layer the parent is located at. parent_levels.append(base_layers[par]) # Get the max layer a parent of the node can be found. max_parent_level = max(parent_levels) # Move the node one layer beyond the max parent node position, so it will be downstream of its parents. base_layers_copy[node] = max_parent_level + 1 # Make another version of updated positions iterate on further. base_layers_copy_copy = base_layers_copy # Move parental source nodes if necessary. for level in topological_gen: for node in level: # Check if node has any parents. if node in child_parents.keys(): parent_levels = [] modify_par = [] # For each parent get their position. for par in child_parents[node]: parent_levels.append(base_layers_copy[par]) # If one of the parents is a source node move # it to the same level as the other nodes the child connects to so # that the connections will not be backwards (and result in a broken line) for par in child_parents[node]: # For a given parent determine if its a source node and that the parents # are not already at level 0, and the parent is not the current component node. if (par in source_nodes and (parent_levels.count(parent_levels[0]) != len(parent_levels)) and par != cn): # If so, remove its position from parent_levels parent_levels.remove(base_layers_copy[par]) # Add this parent to a list of parental positions to modify later. modify_par.append(par) # Get the new max parent level for this node. max_parent_level = max(parent_levels) # Move the node one position downstream of its max parent level. base_layers_copy_copy[node] = max_parent_level + 1 # For each parental position to modify, move the parents level up to the max_parent_level. for par in modify_par: base_layers_copy_copy[par] = max_parent_level return base_layers, base_layers_copy_copy def adjust_node_placement(self, base_layers_copy_copy, base_layers, topological_gen): '''Reorder nodes within topological_generations to match how they were ordered in base_layers_copy_copy Input: topological_gen: list of lists. Indicates layers of nodes. base_layers: dict, key: component name, value: layer represents initial layering of toplogical_gen base_layers_copy_copy: dict, key: component name, value: layer represents the final layering after moving the components/attributes to their desired layer. Output: topological_gen: same format but as the incoming topologial_gen but ordered to match base_layers_copy_copy. ''' if self.figure_type == 'component': # For each node get its new layer in the tangled tree for node, i in base_layers_copy_copy.items(): # Check if node is not already in the proper layer if node not in topological_gen[i]: # If not put it in the appropriate layer topological_gen[i].append(node) # Remove from inappropriate layer. topological_gen[base_layers[node]].remove(node) elif self.figure_type == 'dependency': for node, i in base_layers_copy_copy.items(): # Check if the location of the node is more than the number of # layers topological gen current handles if i > len(topological_gen) - 1: # If so, add node to new node at the end of topological_gen topological_gen.append([node]) # Remove the node from its previous position. topological_gen[base_layers[node]].remove(node) # Else, check if node is not already in the proper layer elif node not in topological_gen[i]: # If not put it in the appropriate layer topological_gen[i].append(node) # Remove from inappropriate layer. topological_gen[base_layers[node]].remove(node) return topological_gen def move_source_nodes_to_bottom_of_layer(self, node_layers, source_nodes): '''For aesthetic purposes move source nodes to the bottom of their respective layers. Input: node_layers (List(list)): Lists of lists of each layer and the nodes contained in that layer as strings. source_nodes (list): list of nodes that do not have a parent. Output: node_layers (List(list)): modified to move source nodes to the bottom of each layer. ''' for i, layer in enumerate(node_layers): nodes_to_move = [] for node in layer: if node in source_nodes: nodes_to_move.append(node) for node in nodes_to_move: node_layers[i].remove(node) node_layers[i].append(node) return node_layers def get_layers_dict_list(self, node_layers, child_parents, parent_children, all_parent_children): '''Convert node_layers to a list of lists of dictionaries that specifies each node and its parents (if applicable). Inputs: node_layers: list of lists of each layer and the nodes contained in that layer as strings. child_parents (dict): key: child value: list of the childs parents parent_children (dict): key: parent value: list of the parents children Outputs: layers_list (List(list): list of lists of dictionaries that specifies each node and its parents (if applicable) ''' num_layers = len(node_layers) layers_list = [[] for i in range(0, num_layers)] for i, layer in enumerate(node_layers): for node in layer: if node in child_parents.keys(): parents = child_parents[node] else: parents = [] if node in parent_children.keys(): direct_children = parent_children[node] else: direct_children = [] if node in all_parent_children.keys(): all_children = all_parent_children[node] else: all_children = [] layers_list[i].append({'id': node, 'parents': parents, 'direct_children': direct_children, 'children': all_children}) return layers_list def get_node_layers_json(self, topological_gen, source_nodes, child_parents, parent_children, cn='', all_parent_children=None): '''Return all the layers of a single tangled tree as a JSON String. Inputs: topological_gen:list of lists. Indicates layers of nodes. source_nodes: list of nodes that do not have a parent. child_parents (dict): key: child value: list of the childs parents parent_children (dict): key: parent value: list of the parents children all_parent_children (dict): key: parent value: list of the parents children (including all downstream nodes). Default to an empty dictionary Outputs: layers_json (JSON String): Layers of nodes in the tangled tree as a json string. ''' base_layers, base_layers_copy_copy = self.get_base_layers(topological_gen, child_parents, source_nodes, cn) # Rearrange node_layers to follow the pattern laid out in component layers. node_layers = self.adjust_node_placement(base_layers_copy_copy, base_layers, topological_gen) # Move source nodes to the bottom of each layer. node_layers = self.move_source_nodes_to_bottom_of_layer(node_layers, source_nodes) # Convert layers to a list of dictionaries if not all_parent_children: # default to an empty dictionary all_parent_children = dict() layers_dicts = self.get_layers_dict_list(node_layers, child_parents, parent_children, all_parent_children) # Convert dictionary to a JSON string layers_json = json.dumps(layers_dicts) return layers_json def save_outputs(self, save_file, layers_json, cn='', all_layers=[]): ''' Inputs: save_file (bool): Indicates whether to save a file locally or not.: layers_json (JSON String): Layers of nodes in the tangled tree as a json string. cn (str): component name, default='' all_layers (list of json strings): Each string represents contains the layers for a single tangled tree. If a dependency figure the list is added to each time this function is called, so starts incomplete. default=[]. Outputs: all_layers (list of json strings): If save_file == False: Each string represents contains the layers for a single tangled tree. If save_file ==True: is an empty list. ''' if save_file == True: if cn: output_file_name = f"{self.schema_abbr}_{self.figure_type}_{cn}_tangled_tree.json" else: output_file_name = f"{self.schema_abbr}_{self.figure_type}_tangled_tree.json" with open(os.path.join(self.json_output_path, output_file_name), 'w') as outfile: outfile.write(layers_json) logger.info(f"Tangled Tree JSON String saved to {os.path.join(self.json_output_path, output_file_name)}.") all_layers = layers_json elif save_file == False: all_layers.append(layers_json) return all_layers def get_ancestors_nodes(self, subgraph, components): """ Inputs: subgraph: networkX graph object components: a list of nodes outputs: all_parent_children: a dictionary that indicates a list of children (including all the intermediate children) of a given node """ all_parent_children = {} for component in components: all_ancestors = self.sg.se.get_nodes_ancestors(subgraph, component) all_parent_children[component] = all_ancestors return all_parent_children def get_tangled_tree_layers(self, save_file=True): '''Based on user indicated figure type, construct the layers of nodes of a tangled tree. Inputs: save_file (bool): Indicates whether to save a file locally or not. Outputs: all_layers (list of json strings): If save_file == False: Each string represents contains the layers for a single tangled tree. If save_file ==True: is an empty list. Note on Dependency Tangled Tree: If there are many conditional requirements associated with a depependency, and those conditional requirements have overlapping attributes associated with them the tangled tree will only report one ''' # Gather the data model's, topological generations, nodes and edges topological_gen, nodes, edges, subg = self.get_topological_generations() if self.figure_type == 'component': # Gather all source nodes source_nodes = self.find_source_nodes(nodes, edges) # Map all children to their parents and vice versa child_parents, parent_children = self.get_parent_child_dictionary(nodes, edges) # find all the downstream nodes all_parent_children = self.get_ancestors_nodes(subg, parent_children.keys()) # Get the layers that each node belongs to. layers_json = self.get_node_layers_json(topological_gen, source_nodes, child_parents, parent_children, all_parent_children=all_parent_children) # If indicated save outputs locally else gather all layers. all_layers = self.save_outputs(save_file, layers_json) if self.figure_type == 'dependency': # Get component digraph and nodes. component_dg = self.sg.se.get_digraph_by_edge_type('requiresComponent') component_nodes = component_dg.nodes() # Get table of attributes. attributes_csv_str = self.ae.parse_attributes(save_file=False) attributes_df = pd.read_table(StringIO(attributes_csv_str), sep=",") all_layers =[] for cn in component_nodes: # Gather attribute and dependency information per node conditional_attributes, ca_alias, all_attributes = self.gather_component_dependency_info(cn, attributes_df) # Gather all source nodes source_nodes = self.find_source_nodes(component_nodes, edges, all_attributes) # Alias the conditional requirement edge back to its actual parent label, # then apply aliasing back to the edges aliased_edges = self.alias_edges(ca_alias, edges) # Gather relationships between children and their parents. child_parents, parent_children = self.get_parent_child_dictionary(nodes, aliased_edges, all_attributes) # Remake topological_gen so it has only relevant nodes. pruned_topological_gen = self.prune_expand_topological_gen(topological_gen, all_attributes, conditional_attributes) # Get the layers that each node belongs to. layers_json = self.get_node_layers_json(pruned_topological_gen, source_nodes, child_parents, parent_children, cn) # If indicated save outputs locally else, gather all layers. all_layers = self.save_outputs(save_file, layers_json, cn, all_layers) return all_layers
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/visualization/tangled_tree.py
0.76895
0.264103
tangled_tree.py
pypi
import gc import json import logging import numpy as np import os import pandas as pd from typing import Any, Dict, Optional, Text, List from schematic.schemas import SchemaGenerator from schematic.utils.io_utils import load_json logger = logging.getLogger(__name__) class AttributesExplorer(): def __init__(self, path_to_jsonld: str, )-> None: self.path_to_jsonld = path_to_jsonld self.json_data_model = load_json(self.path_to_jsonld) self.jsonld = load_json(self.path_to_jsonld) # instantiate a schema generator to retrieve db schema graph from metadata model graph self.sg = SchemaGenerator(self.path_to_jsonld) self.output_path = self.create_output_path('merged_csv') def create_output_path(self, terminal_folder): ''' Create output path to store Observable visualization data if it does not already exist. Args: self.path_to_jsonld Returns: output_path (str): path to store outputs ''' base_dir = os.path.dirname(self.path_to_jsonld) self.schema_name = self.path_to_jsonld.split('/')[-1].split('.model.jsonld')[0] output_path = os.path.join(base_dir, 'visualization', self.schema_name, terminal_folder) if not os.path.exists(output_path): os.makedirs(output_path) return output_path def convert_string_cols_to_json(self, df: pd.DataFrame, cols_to_modify: list): """Converts values in a column from strings to JSON list for upload to Synapse. """ for col in df.columns: if col in cols_to_modify: df[col] = df[col].apply(lambda x: json.dumps([y.strip() for y in x]) if x != "NaN" and x and x == np.nan else x) return df def parse_attributes(self, save_file=True): ''' Args: save_file (bool): True: merged_df is saved locally to output_path. False: merged_df is returned. Returns: merged_df (pd.DataFrame): dataframe containing data relating to attributes for the provided data model for all components in the data model. Dataframe is saved locally as a csv if save_file == True, or returned if save_file == False. ''' # get all components component_dg = self.sg.se.get_digraph_by_edge_type('requiresComponent') components = component_dg.nodes() # For each data type to be loaded gather all attribtes the user would # have to provide. return self._parse_attributes(components, save_file) def parse_component_attributes(self, component=None, save_file=True, include_index=True): ''' Args: save_file (bool): True: merged_df is saved locally to output_path. False: merged_df is returned. include_index (bool): Whether to include the index in the returned dataframe (True) or not (False) Returns: merged_df (pd.DataFrame): dataframe containing data relating to attributes for the provided data model for the specified component in the data model. Dataframe is saved locally as a csv if save_file == True, or returned if save_file == False. ''' if not component: raise ValueError("You must provide a component to visualize.") else: return self._parse_attributes([component], save_file, include_index) def _parse_attributes(self, components, save_file=True, include_index=True): ''' Args: save_file (bool): True: merged_df is saved locally to output_path. False: merged_df is returned. components (list): list of components to parse attributes for include_index (bool): Whether to include the index in the returned dataframe (True) or not (False) Returns: merged_df (pd.DataFrame): dataframe containing data relating to attributes for the provided data model for specified components in the data model. Dataframe is saved locally as a csv if save_file == True, or returned if save_file == False. Raises: ValueError: If unable hits an error while attempting to get conditional requirements. This error is likely to be found if there is a mismatch in naming. ''' # For each data type to be loaded gather all attribtes the user would # have to provide. df_store = [] for component in components: data_dict = {} # get the json schema json_schema = self.sg.get_json_schema_requirements( source_node=component, schema_name=self.path_to_jsonld) # Gather all attribues, their valid values and requirements for key, value in json_schema['properties'].items(): data_dict[key] = {} for k, v in value.items(): if k == 'enum': data_dict[key]['Valid Values'] = value['enum'] if key in json_schema['required']: data_dict[key]['Required'] = True else: data_dict[key]['Required'] = False data_dict[key]['Component'] = component # Add additional details per key (from the JSON-ld) for dic in self.jsonld['@graph']: if 'sms:displayName' in dic.keys(): key = dic['sms:displayName'] if key in data_dict.keys(): data_dict[key]['Attribute'] = dic['sms:displayName'] data_dict[key]['Label'] = dic['rdfs:label'] data_dict[key]['Description'] = dic['rdfs:comment'] if 'validationRules' in dic.keys(): data_dict[key]['Validation Rules'] = dic['validationRules'] # Find conditional dependencies if 'allOf' in json_schema.keys(): for conditional_dependencies in json_schema['allOf']: key = list(conditional_dependencies['then']['properties'])[0] try: if key in data_dict.keys(): if 'Cond_Req' not in data_dict[key].keys(): data_dict[key]['Cond_Req'] = [] data_dict[key]['Conditional Requirements'] = [] attribute = list(conditional_dependencies['if']['properties'])[0] value = conditional_dependencies['if']['properties'][attribute]['enum'] # Capitalize attribute if it begins with a lowercase letter, for aesthetics. if attribute[0].islower(): attribute = attribute.capitalize() # Remove "Type" (i.e. turn "Biospecimen Type" to "Biospcimen") if "Type" in attribute: attribute = attribute.split(" ")[0] # Remove "Type" (i.e. turn "Tissue Type" to "Tissue") if "Type" in value[0]: value[0] = value[0].split(" ")[0] conditional_statement = f'{attribute} is "{value[0]}"' if conditional_statement not in data_dict[key]['Conditional Requirements']: data_dict[key]['Cond_Req'] = True data_dict[key]['Conditional Requirements'].extend([conditional_statement]) except: raise ValueError( f"There is an error getting conditional requirements related " "to the attribute: {key}. The error is likely caused by naming inconsistencies (e.g. uppercase, camelcase, ...)" ) for key, value in data_dict.items(): if 'Conditional Requirements' in value.keys(): ## reformat conditional requirement # get all attributes attr_lst = [i.split(" is ")[-1] for i in data_dict[key]['Conditional Requirements']] # join a list of attributes by using OR attr_str = " OR ".join(attr_lst) # reformat the conditional requirement component_name = data_dict[key]['Conditional Requirements'][0].split(' is ')[0] conditional_statement_str = f' If {component_name} is {attr_str} then "{key}" is required' data_dict[key]['Conditional Requirements'] = conditional_statement_str df = pd.DataFrame(data_dict) df = df.T cols = ['Attribute', 'Label', 'Description', 'Required', 'Cond_Req', 'Valid Values', 'Conditional Requirements', 'Validation Rules', 'Component'] cols = [col for col in cols if col in df.columns] df = df[cols] df = self.convert_string_cols_to_json(df, ['Valid Values']) #df.to_csv(os.path.join(csv_output_path, data_type + '.vis_data.csv')) df_store.append(df) merged_attributes_df = pd.concat(df_store, join='outer') cols = ['Attribute', 'Label', 'Description', 'Required', 'Cond_Req', 'Valid Values', 'Conditional Requirements', 'Validation Rules', 'Component'] cols = [col for col in cols if col in merged_attributes_df.columns] merged_attributes_df = merged_attributes_df[cols] if save_file == True: return merged_attributes_df.to_csv(os.path.join(self.output_path, self.schema_name + 'attributes_data.vis_data.csv'), index=include_index) elif save_file == False: return merged_attributes_df.to_csv(index=include_index)
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/visualization/attributes_explorer.py
0.735167
0.214568
attributes_explorer.py
pypi
import re from dataclasses import field from pydantic.dataclasses import dataclass from pydantic import validator, ConfigDict, Extra # This turns on validation for value assignments after creation pydantic_config = ConfigDict(validate_assignment=True, extra=Extra.forbid) @dataclass(config=pydantic_config) class SynapseConfig: """ config_basename: Path to the synapse config file, either absolute or relative to this file manifest_basename: the name of downloaded manifest files master_fileview_id: Synapse ID of the file view listing all project data assets. """ config: str = ".synapseConfig" manifest_basename: str = "synapse_storage_manifest" master_fileview_id: str = "syn23643253" @validator("master_fileview_id") @classmethod def validate_synapse_id(cls, value: str) -> str: """Check if string is a valid synapse id Args: value (str): A string Raises: ValueError: If the value isn't a valid Synapse id Returns: (str): The input value """ if not re.search("^syn[0-9]+", value): raise ValueError(f"{value} is not a valid Synapse id") return value @validator("config", "manifest_basename") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if not value: raise ValueError(f"{value} is an empty string") return value @dataclass(config=pydantic_config) class ManifestConfig: """ manifest_folder: name of the folder manifests will be saved to locally title: Title or title prefix given to generated manifest(s) data_type: Data types of manifests to be generated or data type (singular) to validate manifest against """ manifest_folder: str = "manifests" title: str = "example" data_type: list[str] = field(default_factory=lambda: ["Biospecimen", "Patient"]) @validator("title", "manifest_folder") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if not value: raise ValueError(f"{value} is an empty string") return value @dataclass(config=pydantic_config) class ModelConfig: """ location: location of the schema jsonld """ location: str = "tests/data/example.model.jsonld" @validator("location") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if not value: raise ValueError(f"{value} is an empty string") return value @dataclass(config=pydantic_config) class GoogleSheetsConfig: """ master_template_id: The template id of the google sheet. strict_validation: When doing google sheet validation (regex match) with the validation rules. True is alerting the user and not allowing entry of bad values. False is warning but allowing the entry on to the sheet. service_acct_creds_synapse_id: The Synapse id of the Google service account credentials. service_acct_creds: Path to the Google service account credentials, either absolute or relative to this file """ service_acct_creds_synapse_id: str = "syn25171627" service_acct_creds: str = "schematic_service_account_creds.json" strict_validation: bool = True @validator("service_acct_creds") @classmethod def validate_string_is_not_empty(cls, value: str) -> str: """Check if string is not empty(has at least one char) Args: value (str): A string Raises: ValueError: If the value is zero characters long Returns: (str): The input value """ if not value: raise ValueError(f"{value} is an empty string") return value @validator("service_acct_creds_synapse_id") @classmethod def validate_synapse_id(cls, value: str) -> str: """Check if string is a valid synapse id Args: value (str): A string Raises: ValueError: If the value isn't a valid Synapse id Returns: (str): The input value """ if not re.search("^syn[0-9]+", value): raise ValueError(f"{value} is not a valid Synapse id") return value
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/configuration/dataclasses.py
0.85738
0.400017
dataclasses.py
pypi
from typing import Optional, Any import os import yaml from schematic.utils.general import normalize_path from .dataclasses import ( SynapseConfig, ManifestConfig, ModelConfig, GoogleSheetsConfig, ) class ConfigNonAllowedFieldError(Exception): """Raised when a user submitted config file contains non allowed fields""" def __init__( self, message: str, fields: list[str], allowed_fields: list[str] ) -> None: """ Args: message (str): A message describing the error fields (list[str]): The fields in the config allowed_fields (list[str]): The allowed fields in the config """ self.message = message self.fields = fields self.allowed_fields = allowed_fields super().__init__(self.message) def __str__(self) -> str: """String representation""" return ( f"{self.message}; " f"config contains fields: {self.fields}; " f"allowed fields: {self.allowed_fields}" ) class Configuration: """ This class is used as a singleton by the rest of the package. It is instantiated only once at the bottom of this file, and that instance is imported by other modules """ def __init__(self) -> None: self.config_path: Optional[str] = None self._parent_directory = os.getcwd() self._synapse_config = SynapseConfig() self._manifest_config = ManifestConfig() self._model_config = ModelConfig() self._google_sheets_config = GoogleSheetsConfig() def load_config(self, config_path: str) -> None: """Loads a user created config file and overwrites any defaults listed in the file Args: config_path (str): The path to the config file Raises: ConfigNonAllowedFieldError: If there are non allowed fields in the config file """ allowed_config_fields = {"asset_store", "manifest", "model", "google_sheets"} config_path = os.path.expanduser(config_path) config_path = os.path.abspath(config_path) self.config_path = config_path self._parent_directory = os.path.dirname(config_path) with open(config_path, "r", encoding="utf-8") as file: config: dict[str, Any] = yaml.safe_load(file) if not set(config.keys()).issubset(allowed_config_fields): raise ConfigNonAllowedFieldError( "Non allowed fields in top level of configuration file.", list(config.keys()), list(allowed_config_fields), ) self._manifest_config = ManifestConfig(**config.get("manifest", {})) self._model_config = ModelConfig(**config.get("model", {})) self._google_sheets_config = GoogleSheetsConfig( **config.get("google_sheets", {}) ) self._set_asset_store(config.get("asset_store", {})) def _set_asset_store(self, config: dict[str, Any]) -> None: allowed_config_fields = {"synapse"} if not config: pass if not set(config.keys()).issubset(allowed_config_fields): raise ConfigNonAllowedFieldError( "Non allowed fields in asset_store of configuration file.", list(config.keys()), list(allowed_config_fields), ) self._synapse_config = SynapseConfig(**config["synapse"]) @property def synapse_configuration_path(self) -> str: """ Returns: str: The path to the synapse configuration file """ return normalize_path(self._synapse_config.config, self._parent_directory) @property def synapse_manifest_basename(self) -> str: """ Returns: str: """ return self._synapse_config.manifest_basename @property def synapse_master_fileview_id(self) -> str: """ Returns: str: """ return self._synapse_config.master_fileview_id @synapse_master_fileview_id.setter def synapse_master_fileview_id(self, synapse_id: str) -> None: """Sets the Synapse master fileview ID Args: synapse_id (str): The synapse id to set """ self._synapse_config.master_fileview_id = synapse_id @property def manifest_folder(self) -> str: """ Returns: str: Location where manifests will saved to """ return self._manifest_config.manifest_folder @property def manifest_title(self) -> str: """ Returns: str: Title or title prefix given to generated manifest(s) """ return self._manifest_config.title @property def manifest_data_type(self) -> list[str]: """ Returns: list[str]: Data types of manifests to be generated or data type (singular) to validate manifest against """ return self._manifest_config.data_type @property def model_location(self) -> str: """ Returns: str: The path to the model.jsonld """ return self._model_config.location @property def service_account_credentials_synapse_id(self) -> str: """ Returns: str: The Synapse id of the Google service account credentials. """ return self._google_sheets_config.service_acct_creds_synapse_id @property def service_account_credentials_path(self) -> str: """ Returns: str: The path of the Google service account credentials. """ return normalize_path( self._google_sheets_config.service_acct_creds, self._parent_directory ) @property def google_sheets_master_template_id(self) -> str: """ Returns: str: The template id of the google sheet. """ return "1LYS5qE4nV9jzcYw5sXwCza25slDfRA1CIg3cs-hCdpU" @property def google_sheets_strict_validation(self) -> bool: """ Returns: bool: Weather or not to disallow bad values in the google sheet """ return self._google_sheets_config.strict_validation @property def google_required_background_color(self) -> dict[str, float]: """ Returns: dict[str, float]: Background color for google sheet """ return { "red": 0.9215, "green": 0.9725, "blue": 0.9803, } @property def google_optional_background_color(self) -> dict[str, float]: """ Returns: dict[str, float]: Background color for google sheet """ return { "red": 1.0, "green": 1.0, "blue": 0.9019, } # This instantiates the singleton for the rest of the package CONFIG = Configuration()
/schematicpy-23.8.1.tar.gz/schematicpy-23.8.1/schematic/configuration/configuration.py
0.909914
0.174199
configuration.py
pypi
2.1.0 / Unreleased ================== **[BREAKING CHANGE]** - Drop Python 2.6 support `#517 <https://github.com/schematics/schematics/pull/517>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) Other changes: - Add TimedeltaType `#540 <https://github.com/schematics/schematics/pull/540>`__ (`gabisurita <https://github.com/gabisurita>`__) - Allow to create Model fields dynamically `#512 <https://github.com/schematics/schematics/pull/512>`__ (`lkraider <https://github.com/lkraider>`__) - Allow ModelOptions to have extra parameters `#449 <https://github.com/schematics/schematics/pull/449>`__ (`rmb938 <https://github.com/rmb938>`__) `#506 <https://github.com/schematics/schematics/pull/506>`__ (`ekampf <https://github.com/ekampf>`__) - Accept callables as serialize roles `#508 <https://github.com/schematics/schematics/pull/508>`__ (`lkraider <https://github.com/lkraider>`__) (`jaysonsantos <https://github.com/jaysonsantos>`__) - Simplify PolyModelType.find_model for readability `#537 <https://github.com/schematics/schematics/pull/537>`__ (`kstrauser <https://github.com/kstrauser>`__) - Enable PolyModelType recursive validation `#535 <https://github.com/schematics/schematics/pull/535>`__ (`javiertejero <https://github.com/javiertejero>`__) - Documentation fixes `#509 <https://github.com/schematics/schematics/pull/509>`__ (`Tuoris <https://github.com/Tuoris>`__) `#514 <https://github.com/schematics/schematics/pull/514>`__ (`tommyzli <https://github.com/tommyzli>`__) `#518 <https://github.com/schematics/schematics/pull/518>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#546 <https://github.com/schematics/schematics/pull/546>`__ (`harveyslash <https://github.com/harveyslash>`__) - Fix Model.init validation when partial is True `#531 <https://github.com/schematics/schematics/issues/531>`__ (`lkraider <https://github.com/lkraider>`__) - Minor number types refactor and mocking fixes `#519 <https://github.com/schematics/schematics/pull/519>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#520 <https://github.com/schematics/schematics/pull/520>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) - Add ability to import models as strings `#496 <https://github.com/schematics/schematics/pull/496>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) - Add EnumType `#504 <https://github.com/schematics/schematics/pull/504>`__ (`ekamil <https://github.com/ekamil>`__) - Dynamic models: Possible memory issues because of _subclasses `#502 <https://github.com/schematics/schematics/pull/502>`__ (`mjrk <https://github.com/mjrk>`__) - Add type hints to constructors of field type classes `#488 <https://github.com/schematics/schematics/pull/488>`__ (`KonishchevDmitry <https://github.com/KonishchevDmitry>`__) - Regression: Do not call field validator if field has not been set `#499 <https://github.com/schematics/schematics/pull/499>`__ (`cmonfort <https://github.com/cmonfort>`__) - Add possibility to translate strings and add initial pt_BR translations `#495 <https://github.com/schematics/schematics/pull/495>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.1 / 2017-05-30 ================== - Support for raising DataError inside custom validate_fieldname methods. `#441 <https://github.com/schematics/schematics/pull/441>`__ (`alexhayes <https://github.com/alexhayes>`__) - Add specialized SchematicsDeprecationWarning. (`lkraider <https://github.com/lkraider>`__) - DateTimeType to_native method should handle type errors gracefully. `#491 <https://github.com/schematics/schematics/pull/491>`__ (`e271828- <https://github.com/e271828->`__) - Allow fields names to override the mapping-interface methods. `#489 <https://github.com/schematics/schematics/pull/489>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.0 / 2017-05-22 ================== **[BREAKING CHANGE]** Version 2.0 introduces many API changes, and it is not fully backwards-compatible with 1.x code. `Full Changelog <https://github.com/schematics/schematics/compare/v1.1.2...v2.0.0>`_ - Add syntax highlighting to README examples `#486 <https://github.com/schematics/schematics/pull/486>`__ (`gabisurita <https://github.com/gabisurita>`__) - Encode Unsafe data state in Model `#484 <https://github.com/schematics/schematics/pull/484>`__ (`lkraider <https://github.com/lkraider>`__) - Add MACAddressType `#482 <https://github.com/schematics/schematics/pull/482>`__ (`aleksej-paschenko <https://github.com/aleksej-paschenko>`__) 2.0.0.b1 / 2017-04-06 ===================== - Enhancing and addressing some issues around exceptions: `#477 <https://github.com/schematics/schematics/pull/477>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) - Allow primitive and native types to be inspected `#431 <https://github.com/schematics/schematics/pull/431>`__ (`chadrik <https://github.com/chadrik>`__) - Atoms iterator performance improvement `#476 <https://github.com/schematics/schematics/pull/476>`__ (`vovanbo <https://github.com/vovanbo>`__) - Fixes 453: Recursive import\_loop with ListType `#475 <https://github.com/schematics/schematics/pull/475>`__ (`lkraider <https://github.com/lkraider>`__) - Schema API `#466 <https://github.com/schematics/schematics/pull/466>`__ (`lkraider <https://github.com/lkraider>`__) - Tweak code example to avoid sql injection `#462 <https://github.com/schematics/schematics/pull/462>`__ (`Ian-Foote <https://github.com/Ian-Foote>`__) - Convert readthedocs links for their .org -> .io migration for hosted projects `#454 <https://github.com/schematics/schematics/pull/454>`__ (`adamchainz <https://github.com/adamchainz>`__) - Support all non-string Iterables as choices (dev branch) `#436 <https://github.com/schematics/schematics/pull/436>`__ (`di <https://github.com/di>`__) - When testing if a values is None or Undefined, use 'is'. `#425 <https://github.com/schematics/schematics/pull/425>`__ (`chadrik <https://github.com/chadrik>`__) 2.0.0a1 / 2016-05-03 ==================== - Restore v1 to\_native behavior; simplify converter code `#412 <https://github.com/schematics/schematics/pull/412>`__ (`bintoro <https://github.com/bintoro>`__) - Change conversion rules for booleans `#407 <https://github.com/schematics/schematics/pull/407>`__ (`bintoro <https://github.com/bintoro>`__) - Test for Model.\_\_init\_\_ context passing to types `#399 <https://github.com/schematics/schematics/pull/399>`__ (`sheilatron <https://github.com/sheilatron>`__) - Code normalization for Python 3 + general cleanup `#391 <https://github.com/schematics/schematics/pull/391>`__ (`bintoro <https://github.com/bintoro>`__) - Add support for arbitrary field metadata. `#390 <https://github.com/schematics/schematics/pull/390>`__ (`chadrik <https://github.com/chadrik>`__) - Introduce MixedType `#380 <https://github.com/schematics/schematics/pull/380>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev2 / 2016-02-06 ======================= - Type maintenance `#383 <https://github.com/schematics/schematics/pull/383>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev1 / 2016-02-01 ======================= - Performance optimizations `#378 <https://github.com/schematics/schematics/pull/378>`__ (`bintoro <https://github.com/bintoro>`__) - Validation refactoring + exception redesign `#374 <https://github.com/schematics/schematics/pull/374>`__ (`bintoro <https://github.com/bintoro>`__) - Fix typo: serilaizataion --> serialization `#373 <https://github.com/schematics/schematics/pull/373>`__ (`jeffwidman <https://github.com/jeffwidman>`__) - Add support for undefined values `#372 <https://github.com/schematics/schematics/pull/372>`__ (`bintoro <https://github.com/bintoro>`__) - Serializable improvements `#371 <https://github.com/schematics/schematics/pull/371>`__ (`bintoro <https://github.com/bintoro>`__) - Unify import/export interface across all types `#368 <https://github.com/schematics/schematics/pull/368>`__ (`bintoro <https://github.com/bintoro>`__) - Correctly decode bytestrings in Python 3 `#365 <https://github.com/schematics/schematics/pull/365>`__ (`bintoro <https://github.com/bintoro>`__) - Fix NumberType.to\_native() `#364 <https://github.com/schematics/schematics/pull/364>`__ (`bintoro <https://github.com/bintoro>`__) - Make sure field.validate() uses a native type `#363 <https://github.com/schematics/schematics/pull/363>`__ (`bintoro <https://github.com/bintoro>`__) - Don't validate ListType items twice `#362 <https://github.com/schematics/schematics/pull/362>`__ (`bintoro <https://github.com/bintoro>`__) - Collect field validators as bound methods `#361 <https://github.com/schematics/schematics/pull/361>`__ (`bintoro <https://github.com/bintoro>`__) - Propagate environment during recursive import/export/validation `#359 <https://github.com/schematics/schematics/pull/359>`__ (`bintoro <https://github.com/bintoro>`__) - DateTimeType & TimestampType major rewrite `#358 <https://github.com/schematics/schematics/pull/358>`__ (`bintoro <https://github.com/bintoro>`__) - Always export empty compound objects as {} / [] `#351 <https://github.com/schematics/schematics/pull/351>`__ (`bintoro <https://github.com/bintoro>`__) - export\_loop cleanup `#350 <https://github.com/schematics/schematics/pull/350>`__ (`bintoro <https://github.com/bintoro>`__) - Fix FieldDescriptor.\_\_delete\_\_ to not touch model `#349 <https://github.com/schematics/schematics/pull/349>`__ (`bintoro <https://github.com/bintoro>`__) - Add validation method for latitude and longitude ranges in GeoPointType `#347 <https://github.com/schematics/schematics/pull/347>`__ (`wraziens <https://github.com/wraziens>`__) - Fix longitude values for GeoPointType mock and add tests `#344 <https://github.com/schematics/schematics/pull/344>`__ (`wraziens <https://github.com/wraziens>`__) - Add support for self-referential ModelType fields `#335 <https://github.com/schematics/schematics/pull/335>`__ (`bintoro <https://github.com/bintoro>`__) - avoid unnecessary code path through try/except `#327 <https://github.com/schematics/schematics/pull/327>`__ (`scavpy <https://github.com/scavpy>`__) - Get mock object for ModelType and ListType `#306 <https://github.com/schematics/schematics/pull/306>`__ (`kaiix <https://github.com/kaiix>`__) 1.1.3 / 2017-06-27 ================== * [Maintenance] (`#501 <https://github.com/schematics/schematics/issues/501>`_) Dynamic models: Possible memory issues because of _subclasses 1.1.2 / 2017-03-27 ================== * [Bug] (`#478 <https://github.com/schematics/schematics/pull/478>`_) Fix dangerous performance issue with ModelConversionError in nested models 1.1.1 / 2015-11-03 ================== * [Bug] (`befa202 <https://github.com/schematics/schematics/commit/befa202c3b3202aca89fb7ef985bdca06f9da37c>`_) Fix Unicode issue with DecimalType * [Documentation] (`41157a1 <https://github.com/schematics/schematics/commit/41157a13896bd32a337c5503c04c5e9cc30ba4c7>`_) Documentation overhaul * [Bug] (`860d717 <https://github.com/schematics/schematics/commit/860d71778421981f284c0612aec665ebf0cfcba2>`_) Fix import that was negatively affecting performance * [Feature] (`93b554f <https://github.com/schematics/schematics/commit/93b554fd6a4e7b38133c4da5592b1843101792f0>`_) Add DataObject to datastructures.py * [Bug] (`#236 <https://github.com/schematics/schematics/pull/236>`_) Set `None` on a field that's a compound type should honour that semantics * [Maintenance] (`#348 <https://github.com/schematics/schematics/pull/348>`_) Update requirements * [Maintenance] (`#346 <https://github.com/schematics/schematics/pull/346>`_) Combining Requirements * [Maintenance] (`#342 <https://github.com/schematics/schematics/pull/342>`_) Remove to_primitive() method from compound types * [Bug] (`#339 <https://github.com/schematics/schematics/pull/339>`_) Basic number validation * [Bug] (`#336 <https://github.com/schematics/schematics/pull/336>`_) Don't evaluate serializable when accessed through class * [Bug] (`#321 <https://github.com/schematics/schematics/pull/321>`_) Do not compile regex * [Maintenance] (`#319 <https://github.com/schematics/schematics/pull/319>`_) Remove mock from install_requires 1.1.0 / 2015-07-12 ================== * [Feature] (`#303 <https://github.com/schematics/schematics/pull/303>`_) fix ListType, validate_items adds to errors list just field name without... * [Feature] (`#304 <https://github.com/schematics/schematics/pull/304>`_) Include Partial Data when Raising ModelConversionError * [Feature] (`#305 <https://github.com/schematics/schematics/pull/305>`_) Updated domain verifications to fit to RFC/working standards * [Feature] (`#308 <https://github.com/schematics/schematics/pull/308>`_) Grennady ordered validation * [Feature] (`#309 <https://github.com/schematics/schematics/pull/309>`_) improves date_time_type error message for custom formats * [Feature] (`#310 <https://github.com/schematics/schematics/pull/310>`_) accept optional 'Z' suffix for UTC date_time_type format * [Feature] (`#311 <https://github.com/schematics/schematics/pull/311>`_) Remove commented lines from models.py * [Feature] (`#230 <https://github.com/schematics/schematics/pull/230>`_) Message normalization 1.0.4 / 2015-04-13 ================== * [Example] (`#286 <https://github.com/schematics/schematics/pull/286>`_) Add schematics usage with Django * [Feature] (`#292 <https://github.com/schematics/schematics/pull/292>`_) increase domain length to 10 for .holiday, .vacations * [Feature] (`#297 <https://github.com/schematics/schematics/pull/297>`_) Support for fields order in serialized format * [Feature] (`#300 <https://github.com/schematics/schematics/pull/300>`_) increase domain length to 32 1.0.3 / 2015-03-07 ================== * [Feature] (`#284 <https://github.com/schematics/schematics/pull/284>`_) Add missing requirement for `six` * [Feature] (`#283 <https://github.com/schematics/schematics/pull/283>`_) Update error msgs to print out invalid values in base.py * [Feature] (`#281 <https://github.com/schematics/schematics/pull/281>`_) Update Model.__eq__ * [Feature] (`#267 <https://github.com/schematics/schematics/pull/267>`_) Type choices should be list or tuple 1.0.2 / 2015-02-12 ================== * [Bug] (`#280 <https://github.com/schematics/schematics/issues/280>`_) Fix the circular import issue. 1.0.1 / 2015-02-01 ================== * [Feature] (`#184 <https://github.com/schematics/schematics/issues/184>`_ / `03b2fd9 <https://github.com/schematics/schematics/commit/03b2fd97fb47c00e8d667cc8ea7254cc64d0f0a0>`_) Support for polymorphic model fields * [Bug] (`#233 <https://github.com/schematics/schematics/pull/233>`_) Set field.owner_model recursively and honor ListType.field.serialize_when_none * [Bug](`#252 <https://github.com/schematics/schematics/pull/252>`_) Fixed project URL * [Feature] (`#259 <https://github.com/schematics/schematics/pull/259>`_) Give export loop to serializable when type has one * [Feature] (`#262 <https://github.com/schematics/schematics/pull/262>`_) Make copies of inherited meta attributes when setting up a Model * [Documentation] (`#276 <https://github.com/schematics/schematics/pull/276>`_) Improve the documentation of get_mock_object 1.0.0 / 2014-10-16 ================== * [Documentation] (`#239 <https://github.com/schematics/schematics/issues/239>`_) Fix typo with wording suggestion * [Documentation] (`#244 <https://github.com/schematics/schematics/issues/244>`_) fix wrong reference in docs * [Documentation] (`#246 <https://github.com/schematics/schematics/issues/246>`_) Using the correct function name in the docstring * [Documentation] (`#245 <https://github.com/schematics/schematics/issues/245>`_) Making the docstring match actual parameter names * [Feature] (`#241 <https://github.com/schematics/schematics/issues/241>`_) Py3k support 0.9.5 / 2014-07-19 ================== * [Feature] (`#191 <https://github.com/schematics/schematics/pull/191>`_) Updated import_data to avoid overwriting existing data. deserialize_mapping can now support partial and nested models. * [Documentation] (`#192 <https://github.com/schematics/schematics/pull/192>`_) Document the creation of custom types * [Feature] (`#193 <https://github.com/schematics/schematics/pull/193>`_) Add primitive types accepting values of any simple or compound primitive JSON type. * [Bug] (`#194 <https://github.com/schematics/schematics/pull/194>`_) Change standard coerce_key function to unicode * [Tests] (`#196 <https://github.com/schematics/schematics/pull/196>`_) Test fixes and cleanup * [Feature] (`#197 <https://github.com/schematics/schematics/pull/197>`_) Giving context to serialization * [Bug] (`#198 <https://github.com/schematics/schematics/pull/198>`_) Fixed typo in variable name in DateTimeType * [Feature] (`#200 <https://github.com/schematics/schematics/pull/200>`_) Added the option to turn of strict conversion when creating a Model from a dict * [Feature] (`#212 <https://github.com/schematics/schematics/pull/212>`_) Support exporting ModelType fields with subclassed model instances * [Feature] (`#214 <https://github.com/schematics/schematics/pull/214>`_) Create mock objects using a class's fields as a template * [Bug] (`#215 <https://github.com/schematics/schematics/pull/215>`_) PEP 8 FTW * [Feature] (`#216 <https://github.com/schematics/schematics/pull/216>`_) Datastructures cleanup * [Feature] (`#217 <https://github.com/schematics/schematics/pull/217>`_) Models cleanup pt 1 * [Feature] (`#218 <https://github.com/schematics/schematics/pull/218>`_) Models cleanup pt 2 * [Feature] (`#219 <https://github.com/schematics/schematics/pull/219>`_) Mongo cleanup * [Feature] (`#220 <https://github.com/schematics/schematics/pull/220>`_) Temporal cleanup * [Feature] (`#221 <https://github.com/schematics/schematics/pull/221>`_) Base cleanup * [Feature] (`#224 <https://github.com/schematics/schematics/pull/224>`_) Exceptions cleanup * [Feature] (`#225 <https://github.com/schematics/schematics/pull/225>`_) Validate cleanup * [Feature] (`#226 <https://github.com/schematics/schematics/pull/226>`_) Serializable cleanup * [Feature] (`#227 <https://github.com/schematics/schematics/pull/227>`_) Transforms cleanup * [Feature] (`#228 <https://github.com/schematics/schematics/pull/228>`_) Compound cleanup * [Feature] (`#229 <https://github.com/schematics/schematics/pull/229>`_) UUID cleanup * [Feature] (`#231 <https://github.com/schematics/schematics/pull/231>`_) Booleans as numbers 0.9.4 / 2013-12-08 ================== * [Feature] (`#178 <https://github.com/schematics/schematics/pull/178>`_) Added deserialize_from flag to BaseType for alternate field names on import * [Bug] (`#186 <https://github.com/schematics/schematics/pull/186>`_) Compoundtype support in ListTypes * [Bug] (`#181 <https://github.com/schematics/schematics/pull/181>`_) Removed that stupid print statement! * [Feature] (`#182 <https://github.com/schematics/schematics/pull/182>`_) Default roles system * [Documentation] (`#190 <https://github.com/schematics/schematics/pull/190>`_) Typos * [Bug] (`#177 <https://github.com/schematics/schematics/pull/177>`_) Removed `__iter__` from ModelMeta * [Documentation] (`#188 <https://github.com/schematics/schematics/pull/188>`_) Typos 0.9.3 / 2013-10-20 ================== * [Documentation] More improvements * [Feature] (`#147 <https://github.com/schematics/schematics/pull/147>`_) Complete conversion over to py.test * [Bug] (`#176 <https://github.com/schematics/schematics/pull/176>`_) Fixed bug preventing clean override of options class * [Bug] (`#174 <https://github.com/schematics/schematics/pull/174>`_) Python 2.6 support 0.9.2 / 2013-09-13 ================== * [Documentation] New History file! * [Documentation] Major improvements to documentation * [Feature] Renamed ``check_value`` to ``validate_range`` * [Feature] Changed ``serialize`` to ``to_native`` * [Bug] (`#155 <https://github.com/schematics/schematics/pull/155>`_) NumberType number range validation bugfix
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/HISTORY.rst
0.816626
0.734429
HISTORY.rst
pypi
from __future__ import absolute_import import functools import operator import sys __all__ = ['PY2', 'PY3', 'string_type', 'iteritems', 'metaclass', 'py_native_string', 'reraise', 'str_compat'] PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY2: __all__ += ['bytes', 'str', 'map', 'zip', 'range'] bytes = str str = unicode string_type = basestring range = xrange from itertools import imap as map from itertools import izip as zip iteritems = operator.methodcaller('iteritems') itervalues = operator.methodcaller('itervalues') # reraise code taken from werzeug BSD license at https://github.com/pallets/werkzeug/blob/master/LICENSE exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') else: string_type = str iteritems = operator.methodcaller('items') itervalues = operator.methodcaller('values') # reraise code taken from werzeug BSD license at https://github.com/pallets/werkzeug/blob/master/LICENSE def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value def metaclass(metaclass): def make_class(cls): attrs = cls.__dict__.copy() if attrs.get('__dict__'): del attrs['__dict__'] del attrs['__weakref__'] return metaclass(cls.__name__, cls.__bases__, attrs) return make_class def py_native_string(source): """ Converts Unicode strings to bytestrings on Python 2. The intended usage is to wrap a function or a string in cases where Python 2 expects a native string. """ if PY2: if isinstance(source, str): return source.encode('ascii') elif callable(source): @functools.wraps(source) def new_func(*args, **kwargs): rv = source(*args, **kwargs) if isinstance(rv, str): rv = rv.encode('unicode-escape') return rv return new_func return source def str_compat(class_): """ On Python 2, patches the ``__str__`` and ``__repr__`` methods on the given class so that the class can be written for Python 3 and Unicode. """ if PY2: if '__str__' in class_.__dict__ and '__unicode__' not in class_.__dict__: class_.__unicode__ = class_.__str__ class_.__str__ = py_native_string(class_.__unicode__) return class_ def repr_compat(class_): if PY2: if '__repr__' in class_.__dict__: class_.__repr__ = py_native_string(class_.__repr__) return class_ def _dict(mapping): return dict((key, mapping[key]) for key in mapping)
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/compat.py
0.561816
0.163813
compat.py
pypi
from __future__ import unicode_literals, absolute_import import inspect import functools from .common import * from .datastructures import Context from .exceptions import FieldError, DataError from .transforms import import_loop, validation_converter from .undefined import Undefined from .iteration import atoms __all__ = [] def validate(schema, mutable, raw_data=None, trusted_data=None, partial=False, strict=False, convert=True, context=None, **kwargs): """ Validate some untrusted data using a model. Trusted data can be passed in the `trusted_data` parameter. :param schema: The Schema to use as source for validation. :param mutable: A mapping or instance that can be changed during validation by Schema functions. :param raw_data: A mapping or instance containing new data to be validated. :param partial: Allow partial data to validate; useful for PATCH requests. Essentially drops the ``required=True`` arguments from field definitions. Default: False :param strict: Complain about unrecognized keys. Default: False :param trusted_data: A ``dict``-like structure that may contain already validated data. :param convert: Controls whether to perform import conversion before validating. Can be turned off to skip an unnecessary conversion step if all values are known to have the right datatypes (e.g., when validating immediately after the initial import). Default: True :returns: data ``dict`` containing the valid raw_data plus ``trusted_data``. If errors are found, they are raised as a ValidationError with a list of errors attached. """ if raw_data is None: raw_data = mutable context = context or get_validation_context(partial=partial, strict=strict, convert=convert) errors = {} try: data = import_loop(schema, mutable, raw_data, trusted_data=trusted_data, context=context, **kwargs) except DataError as exc: errors = dict(exc.errors) data = exc.partial_data errors.update(_validate_model(schema, mutable, data, context)) if errors: raise DataError(errors, data) return data def _validate_model(schema, mutable, data, context): """ Validate data using model level methods. :param schema: The Schema to validate ``data`` against. :param mutable: A mapping or instance that will be passed to the validator containing the original data and that can be mutated. :param data: A dict with data to validate. Invalid items are removed from it. :returns: Errors of the fields that did not pass validation. """ errors = {} invalid_fields = [] has_validator = lambda atom: ( atom.value is not Undefined and atom.name in schema._validator_functions ) for field_name, field, value in atoms(schema, data, filter=has_validator): try: schema._validator_functions[field_name](mutable, data, value, context) except (FieldError, DataError) as exc: serialized_field_name = field.serialized_name or field_name errors[serialized_field_name] = exc.errors invalid_fields.append(field_name) for field_name in invalid_fields: data.pop(field_name) return errors def get_validation_context(**options): validation_options = { 'field_converter': validation_converter, 'partial': False, 'strict': False, 'convert': True, 'validate': True, 'new': False, } validation_options.update(options) return Context(**validation_options) def prepare_validator(func, argcount): if isinstance(func, classmethod): func = func.__get__(object).__func__ try: func_args = inspect.getfullargspec(func).args # PY3 except AttributeError: func_args = inspect.getargspec(func).args # PY2 if len(func_args) < argcount: @functools.wraps(func) def newfunc(*args, **kwargs): if not kwargs or kwargs.pop('context', 0) is 0: args = args[:-1] return func(*args, **kwargs) return newfunc return func
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/validate.py
0.841598
0.490175
validate.py
pypi
from __future__ import unicode_literals, absolute_import from collections import namedtuple from .compat import iteritems from .undefined import Undefined try: # optional type checking import typing if typing.TYPE_CHECKING: from typing import Mapping, Tuple, Callable, Optional, Any, Iterable from .schema import Schema except ImportError: pass Atom = namedtuple('Atom', ('name', 'field', 'value')) Atom.__new__.__defaults__ = (None,) * len(Atom._fields) def atoms(schema, mapping, keys=tuple(Atom._fields), filter=None): # type: (Schema, Mapping, Tuple[str, str, str], Optional[Callable[[Atom], bool]]) -> Iterable[Atom] """ Iterator for the atomic components of a model definition and relevant data that creates a 3-tuple of the field's name, its type instance and its value. :type schema: schematics.schema.Schema :param schema: The Schema definition. :type mapping: Mapping :param mapping: The structure where fields from schema are mapped to values. The only expectation for this structure is that it implements a ``Mapping`` interface. :type keys: Tuple[str, str, str] :param keys: Tuple specifying the output of the iterator. Valid keys are: `name`: the field name `field`: the field descriptor object `value`: the current value set on the field Specifying invalid keys will raise an exception. :type filter: Optional[Callable[[Atom], bool]] :param filter: Function to filter out atoms from the iteration. :rtype: Iterable[Atom] """ if not set(keys).issubset(Atom._fields): raise TypeError('invalid key specified') has_name = 'name' in keys has_field = 'field' in keys has_value = (mapping is not None) and ('value' in keys) for field_name, field in iteritems(schema.fields): value = Undefined if has_value: try: value = mapping[field_name] except Exception: value = Undefined atom_tuple = Atom( name=field_name if has_name else None, field=field if has_field else None, value=value) if filter is None: yield atom_tuple elif filter(atom_tuple): yield atom_tuple class atom_filter: """Group for the default filter functions.""" @staticmethod def has_setter(atom): return getattr(atom.field, 'fset', None) is not None @staticmethod def not_setter(atom): return not atom_filter.has_setter(atom)
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/iteration.py
0.899635
0.335514
iteration.py
pypi
from .compat import str_compat, repr_compat try: from collections.abc import Set # PY3 except ImportError: from collections import Set # PY2 @repr_compat @str_compat class Role(Set): """ A ``Role`` object can be used to filter specific fields against a sequence. The ``Role`` contains two things: a set of names and a function. The function describes how to filter, taking a field name as input and then returning ``True`` or ``False`` to indicate that field should or should not be skipped. A ``Role`` can be operated on as a ``Set`` object representing the fields it has an opinion on. When Roles are combined with other roles, only the filtering behavior of the first role is used. """ def __init__(self, function, fields): self.function = function self.fields = set(fields) def _from_iterable(self, iterable): return Role(self.function, iterable) def __contains__(self, value): return value in self.fields def __iter__(self): return iter(self.fields) def __len__(self): return len(self.fields) def __eq__(self, other): return (self.function.__name__ == other.function.__name__ and self.fields == other.fields) def __str__(self): return '%s(%s)' % (self.function.__name__, ', '.join("'%s'" % f for f in self.fields)) def __repr__(self): return '<Role %s>' % str(self) # edit role fields def __add__(self, other): fields = self.fields.union(other) return self._from_iterable(fields) def __sub__(self, other): fields = self.fields.difference(other) return self._from_iterable(fields) # apply role to field def __call__(self, name, value): return self.function(name, value, self.fields) # static filter functions @staticmethod def wholelist(name, value, seq): """ Accepts a field name, value, and a field list. This function implements acceptance of all fields by never requesting a field be skipped, thus returns False for all input. :param name: The field name to inspect. :param value: The field's value. :param seq: The list of fields associated with the ``Role``. """ return False @staticmethod def whitelist(name, value, seq): """ Implements the behavior of a whitelist by requesting a field be skipped whenever its name is not in the list of fields. :param name: The field name to inspect. :param value: The field's value. :param seq: The list of fields associated with the ``Role``. """ if seq is not None and len(seq) > 0: return name not in seq return True @staticmethod def blacklist(name, value, seq): """ Implements the behavior of a blacklist by requesting a field be skipped whenever its name is found in the list of fields. :param name: The field name to inspect. :param value: The field's value. :param seq: The list of fields associated with the ``Role``. """ if seq is not None and len(seq) > 0: return name in seq return False
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/role.py
0.857872
0.576482
role.py
pypi
from __future__ import unicode_literals, absolute_import from .compat import * try: from collections.abc import Mapping, Sequence # PY3 except ImportError: from collections import Mapping, Sequence # PY2 __all__ = [] class DataObject(object): """ An object for holding data as attributes. ``DataObject`` can be instantiated like ``dict``:: >>> d = DataObject({'one': 1, 'two': 2}, three=3) >>> d.__dict__ {'one': 1, 'two': 2, 'three': 3} Attributes are accessible via the regular dot notation (``d.x``) as well as the subscription syntax (``d['x']``):: >>> d.one == d['one'] == 1 True To convert a ``DataObject`` into a dictionary, use ``d._to_dict()``. ``DataObject`` implements the following collection-like operations: * iteration through attributes as name-value pairs * ``'x' in d`` for membership tests * ``len(d)`` to get the number of attributes Additionally, the following methods are equivalent to their ``dict` counterparts: ``_clear``, ``_get``, ``_keys``, ``_items``, ``_pop``, ``_setdefault``, ``_update``. An advantage of ``DataObject`` over ``dict` subclasses is that every method name in ``DataObject`` begins with an underscore, so attributes like ``"update"`` or ``"values"`` are valid. """ def __init__(self, *args, **kwargs): source = args[0] if args else {} self._update(source, **kwargs) def __repr__(self): return self.__class__.__name__ + '(%s)' % repr(self.__dict__) def _copy(self): return self.__class__(self) __copy__ = _copy def __eq__(self, other): return isinstance(other, DataObject) and self.__dict__ == other.__dict__ def __iter__(self): return iter(self.__dict__.items()) def _update(self, source=None, **kwargs): if isinstance(source, DataObject): source = source.__dict__ self.__dict__.update(source, **kwargs) def _setdefaults(self, source): if isinstance(source, dict): source = source.items() for name, value in source: self._setdefault(name, value) return self def _to_dict(self): d = dict(self.__dict__) for k, v in d.items(): if isinstance(v, DataObject): d[k] = v._to_dict() return d def __setitem__(self, key, value): self.__dict__[key] = value def __getitem__(self, key): return self.__dict__[key] def __delitem__(self, key): del self.__dict__[key] def __len__(self): return len(self.__dict__) def __contains__(self, key): return key in self.__dict__ def _clear(self): return self.__dict__.clear() def _get(self, *args): return self.__dict__.get(*args) def _items(self): return self.__dict__.items() def _keys(self): return self.__dict__.keys() def _pop(self, *args): return self.__dict__.pop(*args) def _setdefault(self, *args): return self.__dict__.setdefault(*args) class Context(DataObject): _fields = () def __init__(self, *args, **kwargs): super(Context, self).__init__(*args, **kwargs) if self._fields: unknowns = [name for name in self._keys() if name not in self._fields] if unknowns: raise ValueError('Unexpected field names: %r' % unknowns) @classmethod def _new(cls, *args, **kwargs): if len(args) > len(cls._fields): raise TypeError('Too many positional arguments') return cls(zip(cls._fields, args), **kwargs) @classmethod def _make(cls, obj): if obj is None: return cls() elif isinstance(obj, cls): return obj else: return cls(obj) def __setattr__(self, name, value): if name in self: raise TypeError("Field '{0}' already set".format(name)) super(Context, self).__setattr__(name, value) def _branch(self, **kwargs): if not kwargs: return self items = dict(((k, v) for k, v in kwargs.items() if v is not None and v != self[k])) if items: return self.__class__(self, **items) else: return self def _setdefaults(self, source): if not isinstance(source, dict): source = source.__dict__ new_values = source.copy() new_values.update(self.__dict__) self.__dict__.update(new_values) return self def __bool__(self): return True __nonzero__ = __bool__ try: from collections import ChainMap except ImportError: """ Code extracted from CPython 3 stdlib: https://github.com/python/cpython/blob/85f2c89ee8223590ba08e3aea97476f76c7e3734/Lib/collections/__init__.py#L852 """ from collections import MutableMapping class ChainMap(MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can be accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): return iter(set().union(*self.maps)) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) # @_recursive_repr() def __repr__(self): return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self, m=None): # like Django's Context.push() '''New ChainMap with a new map followed by all previous maps. If no map is provided, an empty dict is used. ''' if m is None: m = {} return self.__class__(m, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() try: from types import MappingProxyType except ImportError: from collections import Mapping class MappingProxyType(Mapping): def __init__(self, map): self._map = map def __len__(self): return len(self._map) def __iter__(self): return iter(self._map) def __getitem__(self, key): return self._map[key] def __repr__(self): return '{0.__class__.__name__}({1})'.format(self, self._map) class FrozenDict(Mapping): def __init__(self, value): self._value = dict(value) def __getitem__(self, key): return self._value[key] def __iter__(self): return iter(self._value) def __len__(self): return len(self._value) def __hash__(self): if not hasattr(self, "_hash"): _hash = 0 for k, v in self._value.items(): _hash ^= hash(k) _hash ^= hash(v) self._hash = _hash return self._hash def __repr__(self): return repr(self._value) def __str__(self): return str(self._value) class FrozenList(Sequence): def __init__(self, value): self._list = list(value) def __getitem__(self, index): return self._list[index] def __len__(self): return len(self._list) def __hash__(self): if not hasattr(self, "_hash"): _hash = 0 for e in self._list: _hash ^= hash(e) self._hash = _hash return self._hash def __repr__(self): return repr(self._list) def __str__(self): return str(self._list) def __eq__(self, other): if len(self) != len(other): return False for i in range(len(self)): if self[i] != other[i]: return False return True
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/datastructures.py
0.85166
0.312632
datastructures.py
pypi
from __future__ import unicode_literals, absolute_import import json from .common import * from .compat import string_type, str_compat from .datastructures import FrozenDict, FrozenList from .translator import LazyText try: from collections.abc import Mapping, Sequence # PY3 except ImportError: from collections import Mapping, Sequence # PY2 __all__ = [ 'BaseError', 'ErrorMessage', 'FieldError', 'ConversionError', 'ValidationError', 'StopValidationError', 'CompoundError', 'DataError', 'MockCreationError', 'UndefinedValueError', 'UnknownFieldError'] @str_compat class BaseError(Exception): def __init__(self, errors): """ The base class for all Schematics errors. message should be a human-readable message, while errors is a machine-readable list, or dictionary. if None is passed as the message, and error is populated, the primitive representation will be serialized. the Python logging module expects exceptions to be hashable and therefore immutable. As a result, it is not possible to mutate BaseError's error list or dict after initialization. """ errors = self._freeze(errors) super(BaseError, self).__init__(errors) @property def errors(self): return self.args[0] def to_primitive(self): """ converts the errors dict to a primitive representation of dicts, list and strings. """ if not hasattr(self, "_primitive"): self._primitive = self._to_primitive(self.errors) return self._primitive @staticmethod def _freeze(obj): """ freeze common data structures to something immutable. """ if isinstance(obj, dict): return FrozenDict(obj) elif isinstance(obj, list): return FrozenList(obj) else: return obj @classmethod def _to_primitive(cls, obj): """ recursive to_primitive for basic data types. """ if isinstance(obj, string_type): return obj if isinstance(obj, Sequence): return [cls._to_primitive(e) for e in obj] elif isinstance(obj, Mapping): return dict( (k, cls._to_primitive(v)) for k, v in obj.items() ) else: return str(obj) def __str__(self): return json.dumps(self.to_primitive()) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, repr(self.errors)) def __hash__(self): return hash(self.errors) def __eq__(self, other): if type(self) is type(other): return self.errors == other.errors else: return self.errors == other return False def __ne__(self, other): return not (self == other) @str_compat class ErrorMessage(object): def __init__(self, summary, info=None): self.type = None self.summary = summary self.info = info def __repr__(self): return "%s(%s, %s)" % ( self.__class__.__name__, repr(self.summary), repr(self.info) ) def __str__(self): if self.info: return '%s: %s' % (self.summary, self._info_as_str()) else: return '%s' % self.summary def _info_as_str(self): if isinstance(self.info, int): return str(self.info) elif isinstance(self.info, string_type): return '"%s"' % self.info else: return str(self.info) def __eq__(self, other): if isinstance(other, ErrorMessage): return ( self.summary == other.summary and self.type == other.type and self.info == other.info ) elif isinstance(other, string_type): return self.summary == other else: return False def __ne__(self, other): return not (self == other) def __hash__(self): return hash((self.summary, self.type, self.info)) class FieldError(BaseError, Sequence): type = None def __init__(self, *args, **kwargs): if type(self) is FieldError: raise NotImplementedError("Please raise either ConversionError or ValidationError.") if len(args) == 0: raise TypeError("Please provide at least one error or error message.") if kwargs: items = [ErrorMessage(*args, **kwargs)] elif len(args) == 1: arg = args[0] if isinstance(arg, list): items = list(arg) else: items = [arg] else: items = args errors = [] for item in items: if isinstance(item, (string_type, LazyText)): errors.append(ErrorMessage(str(item))) elif isinstance(item, tuple): errors.append(ErrorMessage(*item)) elif isinstance(item, ErrorMessage): errors.append(item) elif isinstance(item, self.__class__): errors.extend(item.errors) else: raise TypeError("'{0}()' object is neither a {1} nor an error message."\ .format(type(item).__name__, type(self).__name__)) for error in errors: error.type = self.type or type(self) super(FieldError, self).__init__(errors) def __contains__(self, value): return value in self.errors def __getitem__(self, index): return self.errors[index] def __iter__(self): return iter(self.errors) def __len__(self): return len(self.errors) class ConversionError(FieldError, TypeError): """ Exception raised when data cannot be converted to the correct python type """ pass class ValidationError(FieldError, ValueError): """Exception raised when invalid data is encountered.""" pass class StopValidationError(ValidationError): """Exception raised when no more validation need occur.""" type = ValidationError class CompoundError(BaseError): def __init__(self, errors): if not isinstance(errors, dict): raise TypeError("Compound errors must be reported as a dictionary.") for key, value in errors.items(): if isinstance(value, CompoundError): errors[key] = value.errors else: errors[key] = value super(CompoundError, self).__init__(errors) class DataError(CompoundError): def __init__(self, errors, partial_data=None): super(DataError, self).__init__(errors) self.partial_data = partial_data class MockCreationError(ValueError): """Exception raised when a mock value cannot be generated.""" pass class UndefinedValueError(AttributeError, KeyError): """Exception raised when accessing a field with an undefined value.""" def __init__(self, model, name): msg = "'%s' instance has no value for field '%s'" % (model.__class__.__name__, name) super(UndefinedValueError, self).__init__(msg) class UnknownFieldError(KeyError): """Exception raised when attempting to access a nonexistent field using the subscription syntax.""" def __init__(self, model, name): msg = "Model '%s' has no field named '%s'" % (model.__class__.__name__, name) super(UnknownFieldError, self).__init__(msg) if PY2: # Python 2 names cannot be unicode __all__ = [n.encode('ascii') for n in __all__]
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/exceptions.py
0.834036
0.178633
exceptions.py
pypi
import functools from ..transforms import convert, to_primitive from ..validate import validate def _callback_wrap(data, schema, transform, *args, **kwargs): return transform(schema, data, *args, **kwargs) class Machine(object): """ A poor man's state machine. """ states = ('raw', 'converted', 'validated', 'serialized') transitions = ( {'trigger': 'init', 'to': 'raw'}, {'trigger': 'convert', 'from': 'raw', 'to': 'converted'}, {'trigger': 'validate', 'from': 'converted', 'to': 'validated'}, {'trigger': 'serialize', 'from': 'validated', 'to': 'serialized'} ) callbacks = { 'convert': functools.partial(_callback_wrap, transform=convert, partial=True), 'validate': functools.partial(_callback_wrap, transform=validate, convert=False, partial=False), 'serialize': functools.partial(_callback_wrap, transform=to_primitive) } def __init__(self, data, *args): self.state = self._transition(trigger='init')['to'] self.data = data self.args = args def __getattr__(self, name): return functools.partial(self.trigger, name) def _transition(self, trigger=None, src_state=None, dst_state=None): try: return next(self._transitions(trigger=trigger, src_state=src_state, dst_state=dst_state)) except StopIteration: return None def _transitions(self, trigger=None, src_state=None, dst_state=None): def pred(d, key, var): return d.get(key) == var if var is not None else True return (d for d in self.transitions if pred(d, 'trigger', trigger) and pred(d, 'from', src_state) and pred(d, 'to', dst_state) ) def trigger(self, trigger): transition = self._transition(trigger=trigger, src_state=self.state) if not transition: raise AttributeError(trigger) callback = self.callbacks.get(trigger) self.data = callback(self.data, *self.args) if callback else self.data self.state = transition['to'] def can(self, state): return bool(self._transition(src_state=self.state, dst_state=state)) def cannot(self, state): return not self.can(state)
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/contrib/machine.py
0.615666
0.265556
machine.py
pypi
from __future__ import unicode_literals, absolute_import try: from enum import Enum except ImportError: pass from ..exceptions import ConversionError from ..translator import _ from ..types import BaseType from ..compat import string_type class EnumType(BaseType): """A field type allowing to use native enums as values. Restricts values to enum members and (optionally) enum values. `use_values` - if set to True allows do assign enumerated values to the field. >>> import enum >>> class E(enum.Enum): ... A = 1 ... B = 2 >>> from schematics import Model >>> class AModel(Model): ... foo = EnumType(E) >>> a = AModel() >>> a.foo = E.A >>> a.foo.value == 1 """ MESSAGES = { 'convert': _("Couldn't interpret '{0}' as member of {1}."), } def __init__(self, enum, use_values=False, **kwargs): """ :param enum: Enum class to which restrict values assigned to the field. :param use_values: If true, also values of the enum (right-hand side) can be assigned here. Other args are passed to superclass. """ self._enum_class = enum self._use_values = use_values super(EnumType, self).__init__(**kwargs) def to_native(self, value, context=None): if isinstance(value, self._enum_class): return value else: by_name = self._find_by_name(value) if by_name: return by_name by_value = self._find_by_value(value) if by_value: return by_value raise ConversionError(self.messages['convert'].format(value, self._enum_class)) def _find_by_name(self, value): if isinstance(value, string_type): try: return self._enum_class[value] except KeyError: pass def _find_by_value(self, value): if not self._use_values: return for member in self._enum_class: if member.value == value: return member def to_primitive(self, value, context=None): if isinstance(value, Enum): if self._use_values: return value.value else: return value.name else: return str(value)
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/contrib/enum_type.py
0.798933
0.265065
enum_type.py
pypi
from __future__ import unicode_literals, absolute_import import copy import datetime import decimal import itertools import numbers import random import re import string import uuid from collections import OrderedDict from ..common import * from ..exceptions import * from ..translator import _ from ..undefined import Undefined from ..util import listify from ..validate import prepare_validator, get_validation_context try: import typing except ImportError: pass try: from collections.abc import Iterable # PY3 except ImportError: from collections import Iterable # PY2 __all__ = [ 'BaseType', 'UUIDType', 'StringType', 'MultilingualStringType', 'NumberType', 'IntType', 'LongType', 'FloatType', 'DecimalType', 'HashType', 'MD5Type', 'SHA1Type', 'BooleanType', 'GeoPointType', 'DateType', 'DateTimeType', 'UTCDateTimeType', 'TimestampType', 'TimedeltaType'] def fill_template(template, min_length, max_length): return template % random_string( get_value_in( min_length, max_length, padding=len(template) - 2, required_length=1)) def get_range_endpoints(min_length, max_length, padding=0, required_length=0): if min_length is None: min_length = 0 if max_length is None: max_length = max(min_length * 2, 16) if padding: max_length = max_length - padding min_length = max(min_length - padding, 0) if max_length < required_length: raise MockCreationError( 'This field is too short to hold the mock data') min_length = max(min_length, required_length) if max_length < min_length: raise MockCreationError('Minimum is greater than maximum') return min_length, max_length def get_value_in(min_length, max_length, padding=0, required_length=0): return random.randint( *get_range_endpoints(min_length, max_length, padding, required_length)) _alphanumeric = string.ascii_letters + string.digits def random_string(length, chars=_alphanumeric): return ''.join(random.choice(chars) for _ in range(length)) _last_position_hint = -1 _next_position_hint = itertools.count() class TypeMeta(type): """ Meta class for BaseType. Merges `MESSAGES` dict and accumulates validator methods. """ def __new__(mcs, name, bases, attrs): messages = {} validators = OrderedDict() for base in reversed(bases): if hasattr(base, 'MESSAGES'): messages.update(base.MESSAGES) if hasattr(base, "_validators"): validators.update(base._validators) if 'MESSAGES' in attrs: messages.update(attrs['MESSAGES']) attrs['MESSAGES'] = messages for attr_name, attr in attrs.items(): if attr_name.startswith("validate_"): validators[attr_name] = 1 attrs[attr_name] = prepare_validator(attr, 3) attrs["_validators"] = validators return type.__new__(mcs, name, bases, attrs) @metaclass(TypeMeta) class BaseType(object): """A base class for Types in a Schematics model. Instances of this class may be added to subclasses of ``Model`` to define a model schema. Validators that need to access variables on the instance can be defined be implementing methods whose names start with ``validate_`` and accept one parameter (in addition to ``self``) :param required: Invalidate field when value is None or is not supplied. Default: False. :param default: When no data is provided default to this value. May be a callable. Default: None. :param serialized_name: The name of this field defaults to the class attribute used in the model. However if the field has another name in foreign data set this argument. Serialized data will use this value for the key name too. :param deserialize_from: A name or list of named fields for which foreign data sets are searched to provide a value for the given field. This only effects inbound data. :param choices: A list of valid choices. This is the last step of the validator chain. :param validators: A list of callables. Each callable receives the value after it has been converted into a rich python type. Default: [] :param serialize_when_none: Dictates if the field should appear in the serialized data even if the value is None. Default: None. :param messages: Override the error messages with a dict. You can also do this by subclassing the Type and defining a `MESSAGES` dict attribute on the class. A metaclass will merge all the `MESSAGES` and override the resulting dict with instance level `messages` and assign to `self.messages`. :param metadata: Dictionary for storing custom metadata associated with the field. To encourage compatibility with external tools, we suggest these keys for common metadata: - *label* : Brief human-readable label - *description* : Explanation of the purpose of the field. Used for help, tooltips, documentation, etc. """ primitive_type = None native_type = None MESSAGES = { 'required': _("This field is required."), 'choices': _("Value must be one of {0}."), } EXPORT_METHODS = { NATIVE: 'to_native', PRIMITIVE: 'to_primitive', } def __init__(self, required=False, default=Undefined, serialized_name=None, choices=None, validators=None, deserialize_from=None, export_level=None, serialize_when_none=None, messages=None, metadata=None): super(BaseType, self).__init__() self.required = required self._default = default self.serialized_name = serialized_name if choices and (isinstance(choices, string_type) or not isinstance(choices, Iterable)): raise TypeError('"choices" must be a non-string Iterable') self.choices = choices self.deserialize_from = listify(deserialize_from) self.validators = [getattr(self, validator_name) for validator_name in self._validators] if validators: self.validators += (prepare_validator(func, 2) for func in validators) self._set_export_level(export_level, serialize_when_none) self.messages = dict(self.MESSAGES, **(messages or {})) self.metadata = metadata or {} self._position_hint = next(_next_position_hint) # For ordering of fields self.name = None self.owner_model = None self.parent_field = None self.typeclass = self.__class__ self.is_compound = False self.export_mapping = dict( (format, getattr(self, fname)) for format, fname in self.EXPORT_METHODS.items()) def __repr__(self): type_ = "%s(%s) instance" % (self.__class__.__name__, self._repr_info() or '') model = " on %s" % self.owner_model.__name__ if self.owner_model else '' field = " as '%s'" % self.name if self.name else '' return "<%s>" % (type_ + model + field) def _repr_info(self): return None def __call__(self, value, context=None): return self.convert(value, context) def __deepcopy__(self, memo): return copy.copy(self) def _mock(self, context=None): return None def _setup(self, field_name, owner_model): """Perform late-stage setup tasks that are run after the containing model has been created. """ self.name = field_name self.owner_model = owner_model self._input_keys = self._get_input_keys() def _set_export_level(self, export_level, serialize_when_none): if export_level is not None: self.export_level = export_level elif serialize_when_none is True: self.export_level = DEFAULT elif serialize_when_none is False: self.export_level = NONEMPTY else: self.export_level = None def get_export_level(self, context): if self.owner_model: level = self.owner_model._options.export_level else: level = DEFAULT if self.export_level is not None: level = self.export_level if context.export_level is not None: level = context.export_level return level def get_input_keys(self, mapping=None): if mapping: return self._get_input_keys(mapping) else: return self._input_keys def _get_input_keys(self, mapping=None): input_keys = [self.name] if self.serialized_name: input_keys.append(self.serialized_name) if mapping and self.name in mapping: input_keys.extend(listify(mapping[self.name])) if self.deserialize_from: input_keys.extend(self.deserialize_from) return input_keys @property def default(self): default = self._default if callable(default): default = default() return default def pre_setattr(self, value): return value def convert(self, value, context=None): return self.to_native(value, context) def export(self, value, format, context=None): return self.export_mapping[format](value, context) def to_primitive(self, value, context=None): """Convert internal data to a value safe to serialize. """ return value def to_native(self, value, context=None): """ Convert untrusted data to a richer Python construct. """ return value def validate(self, value, context=None): """ Validate the field and return a converted value or raise a ``ValidationError`` with a list of errors raised by the validation chain. Stop the validation process from continuing through the validators by raising ``StopValidationError`` instead of ``ValidationError``. """ context = context or get_validation_context() if context.convert: value = self.convert(value, context) elif self.is_compound: self.convert(value, context) errors = [] for validator in self.validators: try: validator(value, context) except ValidationError as exc: errors.append(exc) if isinstance(exc, StopValidationError): break if errors: raise ValidationError(errors) return value def check_required(self, value, context): if self.required and (value is None or value is Undefined): if self.name is None or context and not context.partial: raise ConversionError(self.messages['required']) def validate_choices(self, value, context): if self.choices is not None: if value not in self.choices: raise ValidationError(self.messages['choices'].format(str(self.choices))) def mock(self, context=None): if not self.required and not random.choice([True, False]): return self.default if self.choices is not None: return random.choice(self.choices) return self._mock(context) class UUIDType(BaseType): """A field that stores a valid UUID value. """ primitive_type = str native_type = uuid.UUID MESSAGES = { 'convert': _("Couldn't interpret '{0}' value as UUID."), } def __init__(self, **kwargs): # type: (...) -> uuid.UUID super(UUIDType, self).__init__(**kwargs) def _mock(self, context=None): return uuid.uuid4() def to_native(self, value, context=None): if not isinstance(value, uuid.UUID): try: value = uuid.UUID(value) except (TypeError, ValueError): raise ConversionError(self.messages['convert'].format(value)) return value def to_primitive(self, value, context=None): return str(value) class StringType(BaseType): """A Unicode string field.""" primitive_type = str native_type = str allow_casts = (int, bytes) MESSAGES = { 'convert': _("Couldn't interpret '{0}' as string."), 'decode': _("Invalid UTF-8 data."), 'max_length': _("String value is too long."), 'min_length': _("String value is too short."), 'regex': _("String value did not match validation regex."), } def __init__(self, regex=None, max_length=None, min_length=None, **kwargs): # type: (...) -> typing.Text self.regex = re.compile(regex) if regex else None self.max_length = max_length self.min_length = min_length super(StringType, self).__init__(**kwargs) def _mock(self, context=None): return random_string(get_value_in(self.min_length, self.max_length)) def to_native(self, value, context=None): if isinstance(value, str): return value if isinstance(value, self.allow_casts): if isinstance(value, bytes): try: return str(value, 'utf-8') except UnicodeError: raise ConversionError(self.messages['decode'].format(value)) elif isinstance(value, bool): pass else: return str(value) raise ConversionError(self.messages['convert'].format(value)) def validate_length(self, value, context=None): length = len(value) if self.max_length is not None and length > self.max_length: raise ValidationError(self.messages['max_length']) if self.min_length is not None and length < self.min_length: raise ValidationError(self.messages['min_length']) def validate_regex(self, value, context=None): if self.regex is not None and self.regex.match(value) is None: raise ValidationError(self.messages['regex']) class NumberType(BaseType): """A generic number field. Converts to and validates against `number_type` parameter. """ primitive_type = None native_type = None number_type = None MESSAGES = { 'number_coerce': _("Value '{0}' is not {1}."), 'number_min': _("{0} value should be greater than or equal to {1}."), 'number_max': _("{0} value should be less than or equal to {1}."), } def __init__(self, min_value=None, max_value=None, strict=False, **kwargs): # type: (...) -> typing.Union[int, float] self.min_value = min_value self.max_value = max_value self.strict = strict super(NumberType, self).__init__(**kwargs) def _mock(self, context=None): number = random.uniform( *get_range_endpoints(self.min_value, self.max_value) ) return self.native_type(number) if self.native_type else number def to_native(self, value, context=None): if isinstance(value, bool): value = int(value) if isinstance(value, self.native_type): return value try: native_value = self.native_type(value) except (TypeError, ValueError): pass else: if self.native_type is float: # Float conversion is strict enough. return native_value if not self.strict and native_value == value: # Match numeric types. return native_value if isinstance(value, (string_type, numbers.Integral)): return native_value raise ConversionError(self.messages['number_coerce'] .format(value, self.number_type.lower())) def validate_range(self, value, context=None): if self.min_value is not None and value < self.min_value: raise ValidationError(self.messages['number_min'] .format(self.number_type, self.min_value)) if self.max_value is not None and value > self.max_value: raise ValidationError(self.messages['number_max'] .format(self.number_type, self.max_value)) return value class IntType(NumberType): """A field that validates input as an Integer """ primitive_type = int native_type = int number_type = 'Int' def __init__(self, **kwargs): # type: (...) -> int super(IntType, self).__init__(**kwargs) LongType = IntType class FloatType(NumberType): """A field that validates input as a Float """ primitive_type = float native_type = float number_type = 'Float' def __init__(self, **kwargs): # type: (...) -> float super(FloatType, self).__init__(**kwargs) class DecimalType(NumberType): """A fixed-point decimal number field. """ primitive_type = str native_type = decimal.Decimal number_type = 'Decimal' def to_primitive(self, value, context=None): return str(value) def to_native(self, value, context=None): if isinstance(value, decimal.Decimal): return value if not isinstance(value, (string_type, bool)): value = str(value) try: value = decimal.Decimal(value) except (TypeError, decimal.InvalidOperation): raise ConversionError(self.messages['number_coerce'].format( value, self.number_type.lower())) return value class HashType(StringType): MESSAGES = { 'hash_length': _("Hash value is wrong length."), 'hash_hex': _("Hash value is not hexadecimal."), } def _mock(self, context=None): return random_string(self.LENGTH, string.hexdigits) def to_native(self, value, context=None): value = super(HashType, self).to_native(value, context) if len(value) != self.LENGTH: raise ValidationError(self.messages['hash_length']) try: int(value, 16) except ValueError: raise ConversionError(self.messages['hash_hex']) return value class MD5Type(HashType): """A field that validates input as resembling an MD5 hash. """ LENGTH = 32 class SHA1Type(HashType): """A field that validates input as resembling an SHA1 hash. """ LENGTH = 40 class BooleanType(BaseType): """A boolean field type. In addition to ``True`` and ``False``, coerces these values: + For ``True``: "True", "true", "1" + For ``False``: "False", "false", "0" """ primitive_type = bool native_type = bool TRUE_VALUES = ('True', 'true', '1') FALSE_VALUES = ('False', 'false', '0') def __init__(self, **kwargs): # type: (...) -> bool super(BooleanType, self).__init__(**kwargs) def _mock(self, context=None): return random.choice([True, False]) def to_native(self, value, context=None): if isinstance(value, string_type): if value in self.TRUE_VALUES: value = True elif value in self.FALSE_VALUES: value = False elif isinstance(value, int) and value in [0, 1]: value = bool(value) if not isinstance(value, bool): raise ConversionError(_("Must be either true or false.")) return value class DateType(BaseType): """Defaults to converting to and from ISO8601 date values. """ primitive_type = str native_type = datetime.date SERIALIZED_FORMAT = '%Y-%m-%d' MESSAGES = { 'parse': _("Could not parse {0}. Should be ISO 8601 (YYYY-MM-DD)."), 'parse_formats': _('Could not parse {0}. Valid formats: {1}'), } def __init__(self, formats=None, **kwargs): # type: (...) -> datetime.date if formats: self.formats = listify(formats) self.conversion_errmsg = self.MESSAGES['parse_formats'] else: self.formats = ['%Y-%m-%d'] self.conversion_errmsg = self.MESSAGES['parse'] self.serialized_format = self.SERIALIZED_FORMAT super(DateType, self).__init__(**kwargs) def _mock(self, context=None): return datetime.date( year=random.randrange(600) + 1900, month=random.randrange(12) + 1, day=random.randrange(28) + 1, ) def to_native(self, value, context=None): if isinstance(value, datetime.datetime): return value.date() if isinstance(value, datetime.date): return value for fmt in self.formats: try: return datetime.datetime.strptime(value, fmt).date() except (ValueError, TypeError): continue else: raise ConversionError(self.conversion_errmsg.format(value, ", ".join(self.formats))) def to_primitive(self, value, context=None): return value.strftime(self.serialized_format) class DateTimeType(BaseType): """A field that holds a combined date and time value. The built-in parser accepts input values conforming to the ISO 8601 format ``<YYYY>-<MM>-<DD>T<hh>:<mm>[:<ss.ssssss>][<z>]``. A space may be substituted for the delimiter ``T``. The time zone designator ``<z>`` may be either ``Z`` or ``±<hh>[:][<mm>]``. Values are stored as standard ``datetime.datetime`` instances with the time zone offset in the ``tzinfo`` component if available. Raw values that do not specify a time zone will be converted to naive ``datetime`` objects unless ``tzd='utc'`` is in effect. Unix timestamps are also valid input values and will be converted to UTC datetimes. :param formats: (Optional) A value or iterable of values suitable as ``datetime.datetime.strptime`` format strings, for example ``('%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f')``. If the parameter is present, ``strptime()`` will be used for parsing instead of the built-in parser. :param serialized_format: The output format suitable for Python ``strftime``. Default: ``'%Y-%m-%dT%H:%M:%S.%f%z'`` :param parser: (Optional) An external function to use for parsing instead of the built-in parser. It should return a ``datetime.datetime`` instance. :param tzd: Sets the time zone policy. Default: ``'allow'`` ============== ====================================================================== ``'require'`` Values must specify a time zone. ``'allow'`` Values both with and without a time zone designator are allowed. ``'utc'`` Like ``allow``, but values with no time zone information are assumed to be in UTC. ``'reject'`` Values must not specify a time zone. This also prohibits timestamps. ============== ====================================================================== :param convert_tz: Indicates whether values with a time zone designator should be automatically converted to UTC. Default: ``False`` * ``True``: Convert the datetime to UTC based on its time zone offset. * ``False``: Don't convert. Keep the original time and offset intact. :param drop_tzinfo: Can be set to automatically remove the ``tzinfo`` objects. This option should generally be used in conjunction with the ``convert_tz`` option unless you only care about local wall clock times. Default: ``False`` * ``True``: Discard the ``tzinfo`` components and make naive ``datetime`` objects instead. * ``False``: Preserve the ``tzinfo`` components if present. """ primitive_type = str native_type = datetime.datetime SERIALIZED_FORMAT = '%Y-%m-%dT%H:%M:%S.%f%z' MESSAGES = { 'parse': _('Could not parse {0}. Should be ISO 8601 or timestamp.'), 'parse_formats': _('Could not parse {0}. Valid formats: {1}'), 'parse_external': _('Could not parse {0}.'), 'parse_tzd_require': _('Could not parse {0}. Time zone offset required.'), 'parse_tzd_reject': _('Could not parse {0}. Time zone offset not allowed.'), 'tzd_require': _('Could not convert {0}. Time zone required but not found.'), 'tzd_reject': _('Could not convert {0}. Time zone offsets not allowed.'), 'validate_tzd_require': _('Time zone information required but not found.'), 'validate_tzd_reject': _('Time zone information not allowed.'), 'validate_utc_none': _('Time zone must be UTC but was None.'), 'validate_utc_wrong': _('Time zone must be UTC.'), } REGEX = re.compile(r""" (?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)(?:T|\ ) (?P<hour>\d\d):(?P<minute>\d\d) (?::(?P<second>\d\d)(?:(?:\.|,)(?P<sec_frac>\d{1,6}))?)? (?:(?P<tzd_offset>(?P<tzd_sign>[+−-])(?P<tzd_hour>\d\d):?(?P<tzd_minute>\d\d)?) |(?P<tzd_utc>Z))?$""", re.X) TIMEDELTA_ZERO = datetime.timedelta(0) class fixed_timezone(datetime.tzinfo): def utcoffset(self, dt): return self.offset def fromutc(self, dt): return dt + self.offset def dst(self, dt): return None def tzname(self, dt): return self.str def __str__(self): return self.str def __repr__(self, info=''): return '{0}({1})'.format(type(self).__name__, info) class utc_timezone(fixed_timezone): offset = datetime.timedelta(0) name = str = 'UTC' class offset_timezone(fixed_timezone): def __init__(self, hours=0, minutes=0): self.offset = datetime.timedelta(hours=hours, minutes=minutes) total_seconds = self.offset.days * 86400 + self.offset.seconds self.str = '{0:s}{1:02d}:{2:02d}'.format( '+' if total_seconds >= 0 else '-', int(abs(total_seconds) / 3600), int(abs(total_seconds) % 3600 / 60)) def __repr__(self): return DateTimeType.fixed_timezone.__repr__(self, self.str) UTC = utc_timezone() EPOCH = datetime.datetime(1970, 1, 1, tzinfo=UTC) def __init__(self, formats=None, serialized_format=None, parser=None, tzd='allow', convert_tz=False, drop_tzinfo=False, **kwargs): # type: (...) -> datetime.datetime if tzd not in ('require', 'allow', 'utc', 'reject'): raise ValueError("DateTimeType.__init__() got an invalid value for parameter 'tzd'") self.formats = listify(formats) self.serialized_format = serialized_format or self.SERIALIZED_FORMAT self.parser = parser self.tzd = tzd self.convert_tz = convert_tz self.drop_tzinfo = drop_tzinfo super(DateTimeType, self).__init__(**kwargs) def _mock(self, context=None): dt = datetime.datetime( year=random.randrange(600) + 1900, month=random.randrange(12) + 1, day=random.randrange(28) + 1, hour=random.randrange(24), minute=random.randrange(60), second=random.randrange(60), microsecond=random.randrange(1000000)) if self.tzd == 'reject' or \ self.drop_tzinfo or \ self.tzd == 'allow' and random.randrange(2): return dt elif self.convert_tz: return dt.replace(tzinfo=self.UTC) else: return dt.replace(tzinfo=self.offset_timezone(hours=random.randrange(-12, 15), minutes=random.choice([0, 30, 45]))) def to_native(self, value, context=None): if isinstance(value, datetime.datetime): if value.tzinfo is None: if not self.drop_tzinfo: if self.tzd == 'require': raise ConversionError(self.messages['tzd_require'].format(value)) if self.tzd == 'utc': value = value.replace(tzinfo=self.UTC) else: if self.tzd == 'reject': raise ConversionError(self.messages['tzd_reject'].format(value)) if self.convert_tz: value = value.astimezone(self.UTC) if self.drop_tzinfo: value = value.replace(tzinfo=None) return value if self.formats: # Delegate to datetime.datetime.strptime() using provided format strings. for fmt in self.formats: try: dt = datetime.datetime.strptime(value, fmt) break except (ValueError, TypeError): continue else: raise ConversionError(self.messages['parse_formats'].format(value, ", ".join(self.formats))) elif self.parser: # Delegate to external parser. try: dt = self.parser(value) except: raise ConversionError(self.messages['parse_external'].format(value)) else: # Use built-in parser. try: value = float(value) except ValueError: dt = self.from_string(value) except TypeError: raise ConversionError(self.messages['parse'].format(value)) else: dt = self.from_timestamp(value) if not dt: raise ConversionError(self.messages['parse'].format(value)) if dt.tzinfo is None: if self.tzd == 'require': raise ConversionError(self.messages['parse_tzd_require'].format(value)) if self.tzd == 'utc' and not self.drop_tzinfo: dt = dt.replace(tzinfo=self.UTC) else: if self.tzd == 'reject': raise ConversionError(self.messages['parse_tzd_reject'].format(value)) if self.convert_tz: dt = dt.astimezone(self.UTC) if self.drop_tzinfo: dt = dt.replace(tzinfo=None) return dt def from_string(self, value): match = self.REGEX.match(value) if not match: return None parts = dict(((k, v) for k, v in match.groupdict().items() if v is not None)) p = lambda name: int(parts.get(name, 0)) microsecond = p('sec_frac') and p('sec_frac') * 10 ** (6 - len(parts['sec_frac'])) if 'tzd_utc' in parts: tz = self.UTC elif 'tzd_offset' in parts: tz_sign = 1 if parts['tzd_sign'] == '+' else -1 tz_offset = (p('tzd_hour') * 60 + p('tzd_minute')) * tz_sign if tz_offset == 0: tz = self.UTC else: tz = self.offset_timezone(minutes=tz_offset) else: tz = None try: return datetime.datetime(p('year'), p('month'), p('day'), p('hour'), p('minute'), p('second'), microsecond, tz) except (ValueError, TypeError): return None def from_timestamp(self, value): try: return datetime.datetime(1970, 1, 1, tzinfo=self.UTC) + datetime.timedelta(seconds=value) except (ValueError, TypeError): return None def to_primitive(self, value, context=None): if callable(self.serialized_format): return self.serialized_format(value) return value.strftime(self.serialized_format) def validate_tz(self, value, context=None): if value.tzinfo is None: if not self.drop_tzinfo: if self.tzd == 'require': raise ValidationError(self.messages['validate_tzd_require']) if self.tzd == 'utc': raise ValidationError(self.messages['validate_utc_none']) else: if self.drop_tzinfo: raise ValidationError(self.messages['validate_tzd_reject']) if self.tzd == 'reject': raise ValidationError(self.messages['validate_tzd_reject']) if self.convert_tz \ and value.tzinfo.utcoffset(value) != self.TIMEDELTA_ZERO: raise ValidationError(self.messages['validate_utc_wrong']) class UTCDateTimeType(DateTimeType): """A variant of ``DateTimeType`` that normalizes everything to UTC and stores values as naive ``datetime`` instances. By default sets ``tzd='utc'``, ``convert_tz=True``, and ``drop_tzinfo=True``. The standard export format always includes the UTC time zone designator ``"Z"``. """ SERIALIZED_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' def __init__(self, formats=None, parser=None, tzd='utc', convert_tz=True, drop_tzinfo=True, **kwargs): # type: (...) -> datetime.datetime super(UTCDateTimeType, self).__init__(formats=formats, parser=parser, tzd=tzd, convert_tz=convert_tz, drop_tzinfo=drop_tzinfo, **kwargs) class TimestampType(DateTimeType): """A variant of ``DateTimeType`` that exports itself as a Unix timestamp instead of an ISO 8601 string. Always sets ``tzd='require'`` and ``convert_tz=True``. """ primitive_type = float def __init__(self, formats=None, parser=None, drop_tzinfo=False, **kwargs): # type: (...) -> datetime.datetime super(TimestampType, self).__init__(formats=formats, parser=parser, tzd='require', convert_tz=True, drop_tzinfo=drop_tzinfo, **kwargs) def to_primitive(self, value, context=None): if value.tzinfo is None: value = value.replace(tzinfo=self.UTC) else: value = value.astimezone(self.UTC) delta = value - self.EPOCH return delta.total_seconds() class TimedeltaType(BaseType): """Converts Python Timedelta objects into the corresponding value in seconds. """ primitive_type = float native_type = datetime.timedelta MESSAGES = { 'convert': _("Couldn't interpret '{0}' value as Timedelta."), } DAYS = 'days' SECONDS = 'seconds' MICROSECONDS = 'microseconds' MILLISECONDS = 'milliseconds' MINUTES = 'minutes' HOURS = 'hours' WEEKS = 'weeks' def __init__(self, precision='seconds', **kwargs): # type: (...) -> datetime.timedelta precision = precision.lower() units = (self.DAYS, self.SECONDS, self.MICROSECONDS, self.MILLISECONDS, self.MINUTES, self.HOURS, self.WEEKS) if precision not in units: raise ValueError("TimedeltaType.__init__() got an invalid value for parameter 'precision'") self.precision = precision super(TimedeltaType, self).__init__(**kwargs) def _mock(self, context=None): return datetime.timedelta(seconds=random.random() * 1000) def to_native(self, value, context=None): if isinstance(value, datetime.timedelta): return value try: return datetime.timedelta(**{self.precision: float(value)}) except (ValueError, TypeError): raise ConversionError(self.messages['convert'].format(value)) def to_primitive(self, value, context=None): base_unit = datetime.timedelta(**{self.precision: 1}) return int(value.total_seconds() / base_unit.total_seconds()) class GeoPointType(BaseType): """A list storing a latitude and longitude. """ primitive_type = list native_type = list MESSAGES = { 'point_min': _("{0} value {1} should be greater than or equal to {2}."), 'point_max': _("{0} value {1} should be less than or equal to {2}."), } def _mock(self, context=None): return (random.randrange(-90, 90), random.randrange(-180, 180)) @classmethod def _normalize(cls, value): if isinstance(value, dict): # py3: ensure list and not view return list(value.values()) else: return list(value) def to_native(self, value, context=None): """Make sure that a geo-value is of type (x, y) """ if not isinstance(value, (tuple, list, dict)): raise ConversionError(_('GeoPointType can only accept tuples, lists, or dicts')) elements = self._normalize(value) if not len(elements) == 2: raise ConversionError(_('Value must be a two-dimensional point')) if not all(isinstance(v, (float, int)) for v in elements): raise ConversionError(_('Both values in point must be float or int')) return value def validate_range(self, value, context=None): latitude, longitude = self._normalize(value) if latitude < -90: raise ValidationError( self.messages['point_min'].format('Latitude', latitude, '-90') ) if latitude > 90: raise ValidationError( self.messages['point_max'].format('Latitude', latitude, '90') ) if longitude < -180: raise ValidationError( self.messages['point_min'].format('Longitude', longitude, -180) ) if longitude > 180: raise ValidationError( self.messages['point_max'].format('Longitude', longitude, 180) ) class MultilingualStringType(BaseType): """ A multilanguage string field, stored as a dict with {'locale': 'localized_value'}. Minimum and maximum lengths apply to each of the localized values. At least one of ``default_locale`` or ``context.app_data['locale']`` must be defined when calling ``.to_primitive``. """ primitive_type = str native_type = str allow_casts = (int, bytes) MESSAGES = { 'convert': _("Couldn't interpret value as string."), 'max_length': _("String value in locale {0} is too long."), 'min_length': _("String value in locale {0} is too short."), 'locale_not_found': _("No requested locale was available."), 'no_locale': _("No default or explicit locales were given."), 'regex_locale': _("Name of locale {0} did not match validation regex."), 'regex_localized': _("String value in locale {0} did not match validation regex."), } LOCALE_REGEX = r'^[a-z]{2}(:?_[A-Z]{2})?$' def __init__(self, regex=None, max_length=None, min_length=None, default_locale=None, locale_regex=LOCALE_REGEX, **kwargs): self.regex = re.compile(regex) if regex else None self.max_length = max_length self.min_length = min_length self.default_locale = default_locale self.locale_regex = re.compile(locale_regex) if locale_regex else None super(MultilingualStringType, self).__init__(**kwargs) def _mock(self, context=None): return random_string(get_value_in(self.min_length, self.max_length)) def to_native(self, value, context=None): """Make sure a MultilingualStringType value is a dict or None.""" if not (value is None or isinstance(value, dict)): raise ConversionError(_('Value must be a dict or None')) return value def to_primitive(self, value, context=None): """ Use a combination of ``default_locale`` and ``context.app_data['locale']`` to return the best localized string. """ if value is None: return None context_locale = None if context and 'locale' in context.app_data: context_locale = context.app_data['locale'] # Build a list of all possible locales to try possible_locales = [] for locale in (context_locale, self.default_locale): if not locale: continue if isinstance(locale, string_type): possible_locales.append(locale) else: possible_locales.extend(locale) if not possible_locales: raise ConversionError(self.messages['no_locale']) for locale in possible_locales: if locale in value: localized = value[locale] break else: raise ConversionError(self.messages['locale_not_found']) if not isinstance(localized, str): if isinstance(localized, self.allow_casts): if isinstance(localized, bytes): localized = str(localized, 'utf-8') else: localized = str(localized) else: raise ConversionError(self.messages['convert']) return localized def validate_length(self, value, context=None): for locale, localized in value.items(): len_of_value = len(localized) if localized else 0 if self.max_length is not None and len_of_value > self.max_length: raise ValidationError(self.messages['max_length'].format(locale)) if self.min_length is not None and len_of_value < self.min_length: raise ValidationError(self.messages['min_length'].format(locale)) def validate_regex(self, value, context=None): if self.regex is None and self.locale_regex is None: return for locale, localized in value.items(): if self.regex is not None and self.regex.match(localized) is None: raise ValidationError( self.messages['regex_localized'].format(locale)) if self.locale_regex is not None and self.locale_regex.match(locale) is None: raise ValidationError( self.messages['regex_locale'].format(locale)) if PY2: # Python 2 names cannot be unicode __all__ = [n.encode('ascii') for n in __all__]
/schematics-fork-2.1.1.tar.gz/schematics-fork-2.1.1/schematics/types/base.py
0.789761
0.175644
base.py
pypi
from typing import Type from schematics.common import NOT_NONE from schematics.exceptions import ConversionError from schematics.types import BaseType from schematics.undefined import Undefined from schematics_proto3.enum import ProtobufEnum from schematics_proto3.types.base import ProtobufTypeMixin from schematics_proto3.unset import Unset __all__ = ['EnumType'] class EnumType(ProtobufTypeMixin, BaseType): def __init__(self, enum_class: Type[ProtobufEnum], *, unset_variant=Unset, **kwargs): super().__init__(**kwargs) self.enum_class: Type[ProtobufEnum] = enum_class self.unset_variant = unset_variant def check_required(self: BaseType, value, context): # Treat Unset as required rule violation. if self.required and value in {Unset, self.unset_variant}: raise ConversionError(self.messages['required']) super().check_required(value, context) def convert(self, value, context): if value in {Unset, self.unset_variant}: return Unset if isinstance(value, str): return self.enum_class[value] if isinstance(value, int): return self.enum_class(value) raise AttributeError(f'Expected int or str, got {type(value)}') def export(self, value, format, context): # pylint:disable=redefined-builtin if value is Unset: export_level = self.get_export_level(context) if export_level <= NOT_NONE: return Undefined return Unset return value.name def convert_protobuf(self, msg, field_name, field_names): # pylint:disable=unused-argument # TODO: Catch AttributeError and raise proper exception. value = getattr(msg, field_name) if value in {Unset, self.unset_variant}: return Unset return value def export_protobuf(self, msg, field_name, value): # pylint: disable=no-self-use # TODO: Check that model_class is an instance of Model if field_name is Unset: return setattr( msg, field_name, value.value, )
/schematics_proto3-0.1.3-py3-none-any.whl/schematics_proto3/types/enum.py
0.692538
0.225918
enum.py
pypi
from schematics.common import NOT_NONE from schematics.exceptions import ValidationError, DataError, CompoundError, StopValidationError from schematics.types import CompoundType, BaseType from schematics.undefined import Undefined from schematics_proto3.oneof import OneOfVariant from schematics_proto3.types.base import ProtobufTypeMixin from schematics_proto3.unset import Unset from schematics_proto3.utils import get_value_fallback, set_value_fallback __all__ = ['OneOfType'] class OneOfType(ProtobufTypeMixin, CompoundType): def __init__(self, variants_spec, *args, **kwargs): # TODO: Check that each: # 1) key in variants_spec exists in protobuf message # (with respect to renaming) # 2) value in variants_spec is a subclass of BaseType super().__init__(*args, **kwargs) self.variants_spec = variants_spec self._variant = None self._variant_type = None self._protobuf_renames = {} self._default = Unset for name, spec in variants_spec.items(): pb_name = spec.metadata.get('protobuf_field', None) if pb_name is not None: if pb_name in variants_spec: raise RuntimeError(f'Duplicated variant name `{pb_name}`') self._protobuf_renames[pb_name] = name @property def variant(self): return self._variant @variant.setter def variant(self, name): if name in self.variants_spec: self._variant = name self._variant_type = self.variants_spec[name] elif name in self._protobuf_renames: self._variant = self._protobuf_renames[name] self._variant_type = self.variants_spec[self._variant] else: raise KeyError(name) @property def variant_type(self): return self._variant_type def pre_setattr(self, value): # TODO: Raise proper exceptions variant = None if isinstance(value, OneOfVariant): variant = value if isinstance(value, tuple): if len(value) != 2: raise RuntimeError( f'OneOfVariant tuple must have 2 items, got {len(value)}' ) variant = OneOfVariant(value[0], value[1]) if isinstance(value, dict): if 'variant' not in value or 'value' not in value: raise RuntimeError( 'OneOfVariant dict must have `variant` and `value` keys.' ) variant = OneOfVariant(value['variant'], value['value']) if variant is None: raise RuntimeError('Unknown value') self.variant = variant.variant return variant def convert(self, value, context): # TODO: Raise proper exception (ConversionError) if value is Unset: return Unset if self.variant is None: raise RuntimeError('Variant is unset') val = self.variant_type.convert(value, context) return OneOfVariant(self.variant, val) def validate(self: BaseType, value, context=None): if value is Unset: return Unset # Run validation of inner variant field. try: self.variant_type.validate(value.value, context) except (ValidationError, DataError) as ex: raise CompoundError({ self.variant: ex, }) # Run validation for this field itself. # Following is basically copy of a code in BaseType :/ errors = [] for validator in self.validators: try: validator(value, context) except ValidationError as exc: errors.append(exc) if isinstance(exc, StopValidationError): break if errors: raise ValidationError(errors) return value def export(self, value, format, context): # pylint:disable=redefined-builtin if value in {Unset, None}: export_level = self.get_export_level(context) if export_level <= NOT_NONE: return Undefined return Unset return { 'variant': value.variant, 'value': self.variant_type.export(value.value, format, context), } # Those methods are abstract in CompoundType class, override them to # silence linters. # Raising NotImplementedError does not matter as we already override # convert and export (without underscores) which are called earlier. def _convert(self, value, context): raise NotImplementedError() def _export(self, value, format, context): # pylint:disable=redefined-builtin raise NotImplementedError() def convert_protobuf(self, msg, field_name, field_names): # TODO: Handle value error: # ValueError: Protocol message has no oneof "X" field. variant_name = msg.WhichOneof(field_name) if variant_name is None: return Unset self.variant = variant_name convert_func = getattr(self.variant_type, 'convert_protobuf', get_value_fallback) return convert_func(msg, variant_name, field_names) def export_protobuf(self, msg, field_name, value): # pylint: disable=unused-argument # TODO: Check that model_class is an instance of Model if value in {Unset, None}: return # self.variant = field_name set_value = getattr(self.variant_type, 'export_protobuf', set_value_fallback) set_value(msg, self.variant, value.value)
/schematics_proto3-0.1.3-py3-none-any.whl/schematics_proto3/types/oneof.py
0.602062
0.244036
oneof.py
pypi
this change log is not maintained in this fork 2.1.0 / Unreleased ================== **[BREAKING CHANGE]** - Drop Python 2.6 support `#517 <https://github.com/schematics/schematics/pull/517>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) Other changes: - Add TimedeltaType `#540 <https://github.com/schematics/schematics/pull/540>`__ (`gabisurita <https://github.com/gabisurita>`__) - Allow to create Model fields dynamically `#512 <https://github.com/schematics/schematics/pull/512>`__ (`lkraider <https://github.com/lkraider>`__) - Allow ModelOptions to have extra parameters `#449 <https://github.com/schematics/schematics/pull/449>`__ (`rmb938 <https://github.com/rmb938>`__) `#506 <https://github.com/schematics/schematics/pull/506>`__ (`ekampf <https://github.com/ekampf>`__) - Accept callables as serialize roles `#508 <https://github.com/schematics/schematics/pull/508>`__ (`lkraider <https://github.com/lkraider>`__) (`jaysonsantos <https://github.com/jaysonsantos>`__) - Simplify PolyModelType.find_model for readability `#537 <https://github.com/schematics/schematics/pull/537>`__ (`kstrauser <https://github.com/kstrauser>`__) - Enable PolyModelType recursive validation `#535 <https://github.com/schematics/schematics/pull/535>`__ (`javiertejero <https://github.com/javiertejero>`__) - Documentation fixes `#509 <https://github.com/schematics/schematics/pull/509>`__ (`Tuoris <https://github.com/Tuoris>`__) `#514 <https://github.com/schematics/schematics/pull/514>`__ (`tommyzli <https://github.com/tommyzli>`__) `#518 <https://github.com/schematics/schematics/pull/518>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#546 <https://github.com/schematics/schematics/pull/546>`__ (`harveyslash <https://github.com/harveyslash>`__) - Fix Model.init validation when partial is True `#531 <https://github.com/schematics/schematics/issues/531>`__ (`lkraider <https://github.com/lkraider>`__) - Minor number types refactor and mocking fixes `#519 <https://github.com/schematics/schematics/pull/519>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#520 <https://github.com/schematics/schematics/pull/520>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) - Add ability to import models as strings `#496 <https://github.com/schematics/schematics/pull/496>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) - Add EnumType `#504 <https://github.com/schematics/schematics/pull/504>`__ (`ekamil <https://github.com/ekamil>`__) - Dynamic models: Possible memory issues because of _subclasses `#502 <https://github.com/schematics/schematics/pull/502>`__ (`mjrk <https://github.com/mjrk>`__) - Add type hints to constructors of field type classes `#488 <https://github.com/schematics/schematics/pull/488>`__ (`KonishchevDmitry <https://github.com/KonishchevDmitry>`__) - Regression: Do not call field validator if field has not been set `#499 <https://github.com/schematics/schematics/pull/499>`__ (`cmonfort <https://github.com/cmonfort>`__) - Add possibility to translate strings and add initial pt_BR translations `#495 <https://github.com/schematics/schematics/pull/495>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.1 / 2017-05-30 ================== - Support for raising DataError inside custom validate_fieldname methods. `#441 <https://github.com/schematics/schematics/pull/441>`__ (`alexhayes <https://github.com/alexhayes>`__) - Add specialized SchematicsDeprecationWarning. (`lkraider <https://github.com/lkraider>`__) - DateTimeType to_native method should handle type errors gracefully. `#491 <https://github.com/schematics/schematics/pull/491>`__ (`e271828- <https://github.com/e271828->`__) - Allow fields names to override the mapping-interface methods. `#489 <https://github.com/schematics/schematics/pull/489>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.0 / 2017-05-22 ================== **[BREAKING CHANGE]** Version 2.0 introduces many API changes, and it is not fully backwards-compatible with 1.x code. `Full Changelog <https://github.com/schematics/schematics/compare/v1.1.2...v2.0.0>`_ - Add syntax highlighting to README examples `#486 <https://github.com/schematics/schematics/pull/486>`__ (`gabisurita <https://github.com/gabisurita>`__) - Encode Unsafe data state in Model `#484 <https://github.com/schematics/schematics/pull/484>`__ (`lkraider <https://github.com/lkraider>`__) - Add MACAddressType `#482 <https://github.com/schematics/schematics/pull/482>`__ (`aleksej-paschenko <https://github.com/aleksej-paschenko>`__) 2.0.0.b1 / 2017-04-06 ===================== - Enhancing and addressing some issues around exceptions: `#477 <https://github.com/schematics/schematics/pull/477>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) - Allow primitive and native types to be inspected `#431 <https://github.com/schematics/schematics/pull/431>`__ (`chadrik <https://github.com/chadrik>`__) - Atoms iterator performance improvement `#476 <https://github.com/schematics/schematics/pull/476>`__ (`vovanbo <https://github.com/vovanbo>`__) - Fixes 453: Recursive import\_loop with ListType `#475 <https://github.com/schematics/schematics/pull/475>`__ (`lkraider <https://github.com/lkraider>`__) - Schema API `#466 <https://github.com/schematics/schematics/pull/466>`__ (`lkraider <https://github.com/lkraider>`__) - Tweak code example to avoid sql injection `#462 <https://github.com/schematics/schematics/pull/462>`__ (`Ian-Foote <https://github.com/Ian-Foote>`__) - Convert readthedocs links for their .org -> .io migration for hosted projects `#454 <https://github.com/schematics/schematics/pull/454>`__ (`adamchainz <https://github.com/adamchainz>`__) - Support all non-string Iterables as choices (dev branch) `#436 <https://github.com/schematics/schematics/pull/436>`__ (`di <https://github.com/di>`__) - When testing if a values is None or Undefined, use 'is'. `#425 <https://github.com/schematics/schematics/pull/425>`__ (`chadrik <https://github.com/chadrik>`__) 2.0.0a1 / 2016-05-03 ==================== - Restore v1 to\_native behavior; simplify converter code `#412 <https://github.com/schematics/schematics/pull/412>`__ (`bintoro <https://github.com/bintoro>`__) - Change conversion rules for booleans `#407 <https://github.com/schematics/schematics/pull/407>`__ (`bintoro <https://github.com/bintoro>`__) - Test for Model.\_\_init\_\_ context passing to types `#399 <https://github.com/schematics/schematics/pull/399>`__ (`sheilatron <https://github.com/sheilatron>`__) - Code normalization for Python 3 + general cleanup `#391 <https://github.com/schematics/schematics/pull/391>`__ (`bintoro <https://github.com/bintoro>`__) - Add support for arbitrary field metadata. `#390 <https://github.com/schematics/schematics/pull/390>`__ (`chadrik <https://github.com/chadrik>`__) - Introduce MixedType `#380 <https://github.com/schematics/schematics/pull/380>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev2 / 2016-02-06 ======================= - Type maintenance `#383 <https://github.com/schematics/schematics/pull/383>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev1 / 2016-02-01 ======================= - Performance optimizations `#378 <https://github.com/schematics/schematics/pull/378>`__ (`bintoro <https://github.com/bintoro>`__) - Validation refactoring + exception redesign `#374 <https://github.com/schematics/schematics/pull/374>`__ (`bintoro <https://github.com/bintoro>`__) - Fix typo: serilaizataion --> serialization `#373 <https://github.com/schematics/schematics/pull/373>`__ (`jeffwidman <https://github.com/jeffwidman>`__) - Add support for undefined values `#372 <https://github.com/schematics/schematics/pull/372>`__ (`bintoro <https://github.com/bintoro>`__) - Serializable improvements `#371 <https://github.com/schematics/schematics/pull/371>`__ (`bintoro <https://github.com/bintoro>`__) - Unify import/export interface across all types `#368 <https://github.com/schematics/schematics/pull/368>`__ (`bintoro <https://github.com/bintoro>`__) - Correctly decode bytestrings in Python 3 `#365 <https://github.com/schematics/schematics/pull/365>`__ (`bintoro <https://github.com/bintoro>`__) - Fix NumberType.to\_native() `#364 <https://github.com/schematics/schematics/pull/364>`__ (`bintoro <https://github.com/bintoro>`__) - Make sure field.validate() uses a native type `#363 <https://github.com/schematics/schematics/pull/363>`__ (`bintoro <https://github.com/bintoro>`__) - Don't validate ListType items twice `#362 <https://github.com/schematics/schematics/pull/362>`__ (`bintoro <https://github.com/bintoro>`__) - Collect field validators as bound methods `#361 <https://github.com/schematics/schematics/pull/361>`__ (`bintoro <https://github.com/bintoro>`__) - Propagate environment during recursive import/export/validation `#359 <https://github.com/schematics/schematics/pull/359>`__ (`bintoro <https://github.com/bintoro>`__) - DateTimeType & TimestampType major rewrite `#358 <https://github.com/schematics/schematics/pull/358>`__ (`bintoro <https://github.com/bintoro>`__) - Always export empty compound objects as {} / [] `#351 <https://github.com/schematics/schematics/pull/351>`__ (`bintoro <https://github.com/bintoro>`__) - export\_loop cleanup `#350 <https://github.com/schematics/schematics/pull/350>`__ (`bintoro <https://github.com/bintoro>`__) - Fix FieldDescriptor.\_\_delete\_\_ to not touch model `#349 <https://github.com/schematics/schematics/pull/349>`__ (`bintoro <https://github.com/bintoro>`__) - Add validation method for latitude and longitude ranges in GeoPointType `#347 <https://github.com/schematics/schematics/pull/347>`__ (`wraziens <https://github.com/wraziens>`__) - Fix longitude values for GeoPointType mock and add tests `#344 <https://github.com/schematics/schematics/pull/344>`__ (`wraziens <https://github.com/wraziens>`__) - Add support for self-referential ModelType fields `#335 <https://github.com/schematics/schematics/pull/335>`__ (`bintoro <https://github.com/bintoro>`__) - avoid unnecessary code path through try/except `#327 <https://github.com/schematics/schematics/pull/327>`__ (`scavpy <https://github.com/scavpy>`__) - Get mock object for ModelType and ListType `#306 <https://github.com/schematics/schematics/pull/306>`__ (`kaiix <https://github.com/kaiix>`__) 1.1.3 / 2017-06-27 ================== * [Maintenance] (`#501 <https://github.com/schematics/schematics/issues/501>`_) Dynamic models: Possible memory issues because of _subclasses 1.1.2 / 2017-03-27 ================== * [Bug] (`#478 <https://github.com/schematics/schematics/pull/478>`_) Fix dangerous performance issue with ModelConversionError in nested models 1.1.1 / 2015-11-03 ================== * [Bug] (`befa202 <https://github.com/schematics/schematics/commit/befa202c3b3202aca89fb7ef985bdca06f9da37c>`_) Fix Unicode issue with DecimalType * [Documentation] (`41157a1 <https://github.com/schematics/schematics/commit/41157a13896bd32a337c5503c04c5e9cc30ba4c7>`_) Documentation overhaul * [Bug] (`860d717 <https://github.com/schematics/schematics/commit/860d71778421981f284c0612aec665ebf0cfcba2>`_) Fix import that was negatively affecting performance * [Feature] (`93b554f <https://github.com/schematics/schematics/commit/93b554fd6a4e7b38133c4da5592b1843101792f0>`_) Add DataObject to datastructures.py * [Bug] (`#236 <https://github.com/schematics/schematics/pull/236>`_) Set `None` on a field that's a compound type should honour that semantics * [Maintenance] (`#348 <https://github.com/schematics/schematics/pull/348>`_) Update requirements * [Maintenance] (`#346 <https://github.com/schematics/schematics/pull/346>`_) Combining Requirements * [Maintenance] (`#342 <https://github.com/schematics/schematics/pull/342>`_) Remove to_primitive() method from compound types * [Bug] (`#339 <https://github.com/schematics/schematics/pull/339>`_) Basic number validation * [Bug] (`#336 <https://github.com/schematics/schematics/pull/336>`_) Don't evaluate serializable when accessed through class * [Bug] (`#321 <https://github.com/schematics/schematics/pull/321>`_) Do not compile regex * [Maintenance] (`#319 <https://github.com/schematics/schematics/pull/319>`_) Remove mock from install_requires 1.1.0 / 2015-07-12 ================== * [Feature] (`#303 <https://github.com/schematics/schematics/pull/303>`_) fix ListType, validate_items adds to errors list just field name without... * [Feature] (`#304 <https://github.com/schematics/schematics/pull/304>`_) Include Partial Data when Raising ModelConversionError * [Feature] (`#305 <https://github.com/schematics/schematics/pull/305>`_) Updated domain verifications to fit to RFC/working standards * [Feature] (`#308 <https://github.com/schematics/schematics/pull/308>`_) Grennady ordered validation * [Feature] (`#309 <https://github.com/schematics/schematics/pull/309>`_) improves date_time_type error message for custom formats * [Feature] (`#310 <https://github.com/schematics/schematics/pull/310>`_) accept optional 'Z' suffix for UTC date_time_type format * [Feature] (`#311 <https://github.com/schematics/schematics/pull/311>`_) Remove commented lines from models.py * [Feature] (`#230 <https://github.com/schematics/schematics/pull/230>`_) Message normalization 1.0.4 / 2015-04-13 ================== * [Example] (`#286 <https://github.com/schematics/schematics/pull/286>`_) Add schematics usage with Django * [Feature] (`#292 <https://github.com/schematics/schematics/pull/292>`_) increase domain length to 10 for .holiday, .vacations * [Feature] (`#297 <https://github.com/schematics/schematics/pull/297>`_) Support for fields order in serialized format * [Feature] (`#300 <https://github.com/schematics/schematics/pull/300>`_) increase domain length to 32 1.0.3 / 2015-03-07 ================== * [Feature] (`#284 <https://github.com/schematics/schematics/pull/284>`_) Add missing requirement for `six` * [Feature] (`#283 <https://github.com/schematics/schematics/pull/283>`_) Update error msgs to print out invalid values in base.py * [Feature] (`#281 <https://github.com/schematics/schematics/pull/281>`_) Update Model.__eq__ * [Feature] (`#267 <https://github.com/schematics/schematics/pull/267>`_) Type choices should be list or tuple 1.0.2 / 2015-02-12 ================== * [Bug] (`#280 <https://github.com/schematics/schematics/issues/280>`_) Fix the circular import issue. 1.0.1 / 2015-02-01 ================== * [Feature] (`#184 <https://github.com/schematics/schematics/issues/184>`_ / `03b2fd9 <https://github.com/schematics/schematics/commit/03b2fd97fb47c00e8d667cc8ea7254cc64d0f0a0>`_) Support for polymorphic model fields * [Bug] (`#233 <https://github.com/schematics/schematics/pull/233>`_) Set field.owner_model recursively and honor ListType.field.serialize_when_none * [Bug](`#252 <https://github.com/schematics/schematics/pull/252>`_) Fixed project URL * [Feature] (`#259 <https://github.com/schematics/schematics/pull/259>`_) Give export loop to serializable when type has one * [Feature] (`#262 <https://github.com/schematics/schematics/pull/262>`_) Make copies of inherited meta attributes when setting up a Model * [Documentation] (`#276 <https://github.com/schematics/schematics/pull/276>`_) Improve the documentation of get_mock_object 1.0.0 / 2014-10-16 ================== * [Documentation] (`#239 <https://github.com/schematics/schematics/issues/239>`_) Fix typo with wording suggestion * [Documentation] (`#244 <https://github.com/schematics/schematics/issues/244>`_) fix wrong reference in docs * [Documentation] (`#246 <https://github.com/schematics/schematics/issues/246>`_) Using the correct function name in the docstring * [Documentation] (`#245 <https://github.com/schematics/schematics/issues/245>`_) Making the docstring match actual parameter names * [Feature] (`#241 <https://github.com/schematics/schematics/issues/241>`_) Py3k support 0.9.5 / 2014-07-19 ================== * [Feature] (`#191 <https://github.com/schematics/schematics/pull/191>`_) Updated import_data to avoid overwriting existing data. deserialize_mapping can now support partial and nested models. * [Documentation] (`#192 <https://github.com/schematics/schematics/pull/192>`_) Document the creation of custom types * [Feature] (`#193 <https://github.com/schematics/schematics/pull/193>`_) Add primitive types accepting values of any simple or compound primitive JSON type. * [Bug] (`#194 <https://github.com/schematics/schematics/pull/194>`_) Change standard coerce_key function to unicode * [Tests] (`#196 <https://github.com/schematics/schematics/pull/196>`_) Test fixes and cleanup * [Feature] (`#197 <https://github.com/schematics/schematics/pull/197>`_) Giving context to serialization * [Bug] (`#198 <https://github.com/schematics/schematics/pull/198>`_) Fixed typo in variable name in DateTimeType * [Feature] (`#200 <https://github.com/schematics/schematics/pull/200>`_) Added the option to turn of strict conversion when creating a Model from a dict * [Feature] (`#212 <https://github.com/schematics/schematics/pull/212>`_) Support exporting ModelType fields with subclassed model instances * [Feature] (`#214 <https://github.com/schematics/schematics/pull/214>`_) Create mock objects using a class's fields as a template * [Bug] (`#215 <https://github.com/schematics/schematics/pull/215>`_) PEP 8 FTW * [Feature] (`#216 <https://github.com/schematics/schematics/pull/216>`_) Datastructures cleanup * [Feature] (`#217 <https://github.com/schematics/schematics/pull/217>`_) Models cleanup pt 1 * [Feature] (`#218 <https://github.com/schematics/schematics/pull/218>`_) Models cleanup pt 2 * [Feature] (`#219 <https://github.com/schematics/schematics/pull/219>`_) Mongo cleanup * [Feature] (`#220 <https://github.com/schematics/schematics/pull/220>`_) Temporal cleanup * [Feature] (`#221 <https://github.com/schematics/schematics/pull/221>`_) Base cleanup * [Feature] (`#224 <https://github.com/schematics/schematics/pull/224>`_) Exceptions cleanup * [Feature] (`#225 <https://github.com/schematics/schematics/pull/225>`_) Validate cleanup * [Feature] (`#226 <https://github.com/schematics/schematics/pull/226>`_) Serializable cleanup * [Feature] (`#227 <https://github.com/schematics/schematics/pull/227>`_) Transforms cleanup * [Feature] (`#228 <https://github.com/schematics/schematics/pull/228>`_) Compound cleanup * [Feature] (`#229 <https://github.com/schematics/schematics/pull/229>`_) UUID cleanup * [Feature] (`#231 <https://github.com/schematics/schematics/pull/231>`_) Booleans as numbers 0.9.4 / 2013-12-08 ================== * [Feature] (`#178 <https://github.com/schematics/schematics/pull/178>`_) Added deserialize_from flag to BaseType for alternate field names on import * [Bug] (`#186 <https://github.com/schematics/schematics/pull/186>`_) Compoundtype support in ListTypes * [Bug] (`#181 <https://github.com/schematics/schematics/pull/181>`_) Removed that stupid print statement! * [Feature] (`#182 <https://github.com/schematics/schematics/pull/182>`_) Default roles system * [Documentation] (`#190 <https://github.com/schematics/schematics/pull/190>`_) Typos * [Bug] (`#177 <https://github.com/schematics/schematics/pull/177>`_) Removed `__iter__` from ModelMeta * [Documentation] (`#188 <https://github.com/schematics/schematics/pull/188>`_) Typos 0.9.3 / 2013-10-20 ================== * [Documentation] More improvements * [Feature] (`#147 <https://github.com/schematics/schematics/pull/147>`_) Complete conversion over to py.test * [Bug] (`#176 <https://github.com/schematics/schematics/pull/176>`_) Fixed bug preventing clean override of options class * [Bug] (`#174 <https://github.com/schematics/schematics/pull/174>`_) Python 2.6 support 0.9.2 / 2013-09-13 ================== * [Documentation] New History file! * [Documentation] Major improvements to documentation * [Feature] Renamed ``check_value`` to ``validate_range`` * [Feature] Changed ``serialize`` to ``to_native`` * [Bug] (`#155 <https://github.com/schematics/schematics/pull/155>`_) NumberType number range validation bugfix
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/HISTORY.rst
0.72331
0.706444
HISTORY.rst
pypi
.. _types: ===== Types ===== Types are the smallest definition of structure in Schematics. They represent structure by offering functions to inspect or mutate the data in some way. According to Schematics, a type is an instance of a way to do three things: 1. Coerce the data type into an appropriate representation in Python 2. Convert the Python representation into other formats suitable for serialization 3. Offer a precise method of validating data of many forms These properties are implemented as ``to_native``, ``to_primitive``, and ``validate``. Coercion ======== A simple example is the ``DateTimeType``. :: >>> from schematics.types import DateTimeType >>> dt_t = DateTimeType() The ``to_native`` function transforms an ISO8601 formatted date string into a Python ``datetime.datetime``. :: >>> dt = dt_t.to_native('2013-08-31T02:21:21.486072') >>> dt datetime.datetime(2013, 8, 31, 2, 21, 21, 486072) Conversion ========== The ``to_primitive`` function changes it back to a language agnostic form, in this case an ISO8601 formatted string, just like we used above. :: >>> dt_t.to_primitive(dt) '2013-08-31T02:21:21.486072' Validation ========== Validation can be as simple as successfully calling ``to_native``, but sometimes more is needed. data or behavior during a typical use, like serialization. Let's look at the ``StringType``. We'll set a ``max_length`` of 10. :: >>> st = StringType(max_length=10) >>> st.to_native('this is longer than 10') u'this is longer than 10' It converts to a string just fine. Now, let's attempt to validate it. :: >>> st.validate('this is longer than 10') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "schematics/types/base.py", line 164, in validate raise ValidationError(errors) schematics.exceptions.ValidationError: [u'String value is too long.'] Custom types ============ If the types provided by the schematics library don't meet all of your needs, you can also create new types. Do so by extending ``schematics.types.BaseType``, and decide which based methods you need to override. `to_native` ~~~~~~~~~~~ By default, this method on ``schematics.types.BaseType`` just returns the primitive value it was given. Override this if you want to convert it to a specific native value. For example, suppose we are implementing a type that represents the net-location portion of a URL, which consists of a hostname and optional port number:: >>> from schematics.types import BaseType >>> class NetlocType(BaseType): ... def to_native(self, value): ... if ':' in value: ... return tuple(value.split(':', 1)) ... return (value, None) `to_primitive` ~~~~~~~~~~~~~~ By default, this method on ``schematics.types.BaseType`` just returns the native value it was given. Override this to convert any non-primitive values to primitive data values. The following types can pass through safely: * int * float * bool * basestring * NoneType * lists or dicts of any of the above or containing other similarly constrained lists or dicts To cover values that fall outside of these definitions, define a primitive conversion:: >>> from schematics.types import BaseType >>> class NetlocType(BaseType): ... def to_primitive(self, value): ... host, port = value ... if port: ... return u'{0}:{1}'.format(host, port) ... return host validation ~~~~~~~~~~ The base implementation of `validate` runs individual validators defined: * At type class definition time, as methods named in a specific way * At instantiation time as arguments to the type's init method. The second type is explained by ``schematics.types.BaseType``, so we'll focus on the first option. Declared validation methods take names of the form `validate_constraint(self, value)`, where `constraint` is an arbitrary name you give to the check being performed. If the check fails, then the method should raise ``schematics.exceptions.ValidationError``:: >>> from schematics.exceptions import ValidationError >>> from schematics.types import BaseType >>> class NetlocType(BaseType): ... def validate_netloc(self, value): ... if ':' not in value: ... raise ValidationError('Value must be a valid net location of the form host[:port]') However, schematics types do define an organized way to define and manage coded error messages. By defining a `MESSAGES` dict, you can assign error messages to your constraint name. Then the message is available as `self.message['my_constraint']` in validation methods. Sub-classes can add messages for new codes or replace messages for existing codes. However, they will inherit messages for error codes defined by base classes. So, to enhance the prior example:: >>> from schematics.exceptions import ValidationError >>> from schematics.types import BaseType >>> class NetlocType(BaseType): ... MESSAGES = { ... 'netloc': 'Value must be a valid net location of the form host[:port]' ... } ... def validate_netloc(self, value): ... if ':' not in value: ... raise ValidationError(self.messages['netloc']) Parameterizing types ~~~~~~~~~~~~~~~~~~~~ There may be times when you want to override `__init__` and parameterize your type. When you do so, just ensure two things: * Don't redefine any of the initialization parameters defined for ``schematics.types.BaseType``. * After defining your specific parameters, ensure that the base parameters are given to the base init method. The simplest way to ensure this is to accept `*args` and `**kwargs` and pass them through to the super init method, like so:: >>> from schematics.types import BaseType >>> class NetlocType(BaseType): ... def __init__(self, verify_location=False, *args, **kwargs): ... super().__init__(*args, **kwargs) ... self.verify_location = verify_location More Information ================ To learn more about **Types**, visit the :ref:`Types API <api_doc_types>`
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/docs/usage/types.rst
0.947817
0.699889
types.rst
pypi
.. _exporting: ========= Exporting ========= To export data is to go from the Schematics representation of data to some other form. It's also possible you want to adjust some things along the way, such as skipping over some fields or providing empty values for missing fields. The general mechanism for data export is to call a function on every field in the model. The function probably converts the field's value to some other format, but you can easily modify it. We'll use the following model for the examples: :: from schematics.models import Model from schematics.types import StringType, DateTimeType from schematics.transforms import blacklist class Movie(Model): name = StringType() director = StringType() release_date = DateTimeType personal_thoughts = StringType() class Options: roles = {'public': blacklist('personal_thoughts')} .. _exporting_terminology: Terminology =========== To `serialize` data is to convert from the way it's represented in Schematics to some other form. That might be a reduction of the ``Model`` into a ``dict``, but it might also be more complicated. A field can be serialized if it is an instance of ``BaseType`` or if a function is wrapped with the ``@serializable`` decorator. A ``Model`` instance may be serialized with a particular `context`. A context is a ``dict`` passed through the model to each of its fields. A field may use values from the context to alter how it is serialized. .. _exporting_converting_data: Converting Data =============== To export data is basically to convert from one form to another. Schematics can convert data into simple Python types or a language agnostic format. We refer to the native serialization as `to_native`, but we refer to the language agnostic format as `primitive`, since it has removed all dependencies on Python. .. _exporting_native_types: Native Types ------------ The fields in a model attempt to use the best Python representation of data whenever possible. For example, the DateTimeType will use Python's ``datetime.datetime`` module. You can reduce a model into the native Python types by calling ``to_native``. >>> trainspotting = Movie() >>> trainspotting.name = u'Trainspotting' >>> trainspotting.director = u'Danny Boyle' >>> trainspotting.release_date = datetime.datetime(1996, 7, 19, 0, 0) >>> trainspotting.personal_thoughts = 'This movie was great!' >>> trainspotting.to_native() { 'name': u'Trainspotting', 'director': u'Danny Boyle', 'release_date': datetime.datetime(1996, 7, 19, 0, 0), 'personal_thoughts': 'This movie was great!' } .. _exporting_primitive_types: Primitive Types --------------- To present data to clients we have the ``Model.to_primitive`` method. Default behavior is to output the same data you would need to reproduce the model in its current state. :: >>> trainspotting.to_primitive() { 'name': u'Trainspotting', 'director': u'Danny Boyle', 'release_date': '1996-07-19T00:00:00.000000', 'personal_thoughts': 'This movie was great!' } Great. We got the primitive data back. It would be easy to convert to JSON from here. >>> import json >>> json.dumps(trainspotting.to_primitive()) '{ "name": "Trainspotting", "director": "Danny Boyle", "release_date": "1996-07-19T00:00:00.000000", "personal_thoughts": "This movie was great!" }' .. _exporting_using_contexts: Using Contexts -------------- Sometimes a field needs information about its environment to know how to serialize itself. For example, the ``MultilingualStringType`` holds several translations of a phrase: >>> class TestModel(Model): ... mls = MultilingualStringType() ... >>> mls_test = TestModel({'mls': { ... 'en_US': 'Hello, world!', ... 'fr_FR': 'Bonjour tout le monde!', ... 'es_MX': '¡Hola, mundo!', ... }}) In this case, serializing without knowing which localized string to use wouldn't make sense: >>> mls_test.to_primitive() [...] schematics.exceptions.ConversionError: [u'No default or explicit locales were given.'] Neither does choosing the locale ahead of time, because the same MultilingualStringType field might be serialized several times with different locales inside the same method. However, it could use information in a `context` to return a useful representation: >>> mls_test.to_primitive(context={'locale': 'en_US'}) {'mls': 'Hello, world!'} This allows us to use the same model instance several times with different contexts: >>> for user, locale in [('Joe', 'en_US'), ('Sue', 'es_MX')]: ... print('%s says %s' % (user, mls_test.to_primitive(context={'locale': locale})['mls'])) ... Joe says Hello, world! Sue says ¡Hola, mundo! .. _exporting_compound_types: Compound Types ============== Let's complicate things and observe what happens with data exporting. First, we'll define a collection which will have a list of ``Movie`` instances. First, let's instantiate another movie. :: >>> total_recall = Movie() >>> total_recall.name = u'Total Recall' >>> total_recall.director = u'Paul Verhoeven' >>> total_recall.release_date = datetime.datetime(1990, 6, 1, 0, 0) >>> total_recall.personal_thoughts = 'Old classic. Still love it.' Now, let's define a collection, which has a list of movies in it. :: from schematics.types.compound import ListType, ModelType class Collection(Model): name = StringType() movies = ListType(ModelType(Movie)) notes = StringType() class Options: roles = {'public': blacklist('notes')} Let's instantiate a collection. >>> favorites = Collection() >>> favorites.name = 'My favorites' >>> favorites.notes = 'These are some of my favorite movies' >>> favorites.movies = [trainspotting, total_recall] Here is what happens when we call ``to_primitive()`` on it. >>> favorites.to_primitive() { 'notes': 'These are some of my favorite movies', 'name': 'My favorites', 'movies': [{ 'name': u'Trainspotting', 'director': u'Danny Boyle', 'personal_thoughts': 'This movie was great!', 'release_date': '1996-07-19T00:00:00.000000' }, { 'name': u'Total Recall', 'director': u'Paul Verhoeven', 'personal_thoughts': 'Old classic. Still love it.', 'release_date': '1990-06-01T00:00:00.000000' }] } .. _exporting_customizing_output: Customizing Output ================== Schematics offers many ways to customize the behavior of serialization: .. _exporting_roles: Roles ----- Roles offer a way to specify whether or not a field should be skipped during export. There are many reasons this might be desirable, such as access permissions or to not serialize more data than absolutely necessary. Roles are implemented as either white lists or black lists where the members of the list are field names. :: >>> r = blacklist('private_field', 'another_private_field') Imagine we are sending our movie instance to a random person on the Internet. We probably don't want to share our personal thoughts. Recall earlier that we added a role called ``public`` and gave it a blacklist with ``personal_thoughts`` listed. :: class Movie(Model): personal_thoughts = StringType() ... class Options: roles = {'public': blacklist('personal_thoughts')} This is what it looks like to use the role, which should simply remove ``personal_thoughts`` from the export. :: >>> movie.to_primitive(role='public') { 'name': u'Trainspotting', 'director': u'Danny Boyle', 'release_date': '1996-07-19T00:00:00.000000' } This works for compound types too, such as the list of movies in our ``Collection`` model above. :: class Collection(Model): notes = StringType() ... class Options: roles = {'public': blacklist('notes')} We expect the ``personal_thoughts`` field to removed from the movie data and we also expect the ``notes`` field to be removed from the collection data. >>> favorites.to_primitive(role='public') { 'name': 'My favorites', 'movies': [{ 'name': u'Trainspotting', 'director': u'Danny Boyle', 'release_date': '1996-07-19T00:00:00.000000' }, { 'name': u'Total Recall', 'director': u'Paul Verhoeven', 'release_date': '1990-06-01T00:00:00.000000' }] } If no role is specified, the default behavior is to export all fields. This behavior can be overridden by specifying a ``default`` role. Renaming the ``public`` role to ``default`` in the example above yields equivalent results without having to specify ``role`` in the export function. >>> favorites.to_primitive() { 'name': 'My favorites', 'movies': [{ 'name': u'Trainspotting', 'director': u'Danny Boyle', 'release_date': '1996-07-19T00:00:00.000000' }, { 'name': u'Total Recall', 'director': u'Paul Verhoeven', 'release_date': '1990-06-01T00:00:00.000000' }] } .. _exporting_serializable: Serializable ------------ Earlier we mentioned a ``@serializable`` decorator. You can write a function that will produce a value used during serialization with a field name matching the function name. That looks like this: :: ... from schematics.types.serializable import serializable class Song(Model): name = StringType() artist = StringType() url = URLType() @serializable def id(self): return u'%s/%s' % (self.artist, self.name) This is what it looks like to use it. :: >>> song = Song() >>> song.artist = 'Fiona Apple' >>> song.name = 'Werewolf' >>> song.url = 'http://www.youtube.com/watch?v=67KGSJVkix0' >>> song.id 'Fiona Apple/Werewolf' Or here: :: >>> song.to_native() { 'id': u'Fiona Apple/Werewolf', 'artist': u'Fiona Apple' 'name': u'Werewolf', 'url': u'http://www.youtube.com/watch?v=67KGSJVkix0', } .. _exporting_serialized_name: Serialized Name --------------- There are times when you have one name for a field in one place and another name for it somewhere else. Schematics tries to help you by letting you customize the field names used during serialization. That looks like this: :: class Person(Model): name = StringType(serialized_name='person_name') Notice the effect it has on serialization. :: >>> p = Person() >>> p.name = 'Ben Weinman' >>> p.to_native() {'person_name': u'Ben Weinman'} .. _exporting_serialize_when_none: Serialize When None ------------------- If a value is not required and doesn't have a value, it will serialize with a None value by default. This can be disabled. :: >>> song = Song() >>> song.to_native() {'url': None, 'name': None, 'artist': None} You can disable at the field level like this: :: class Song(Model): name = StringType(serialize_when_none=False) artist = StringType() And this produces the following: :: >>> s = Song() >>> s.to_native() {'artist': None} Or you can disable it at the class level: :: class Song(Model): name = StringType() artist = StringType() class Options: serialize_when_none=False Using it: :: >>> s = Song() >>> s.to_native() >>> More Information ================ To learn more about **Exporting**, visit the :ref:`Transforms API <api_doc_transforms>`
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/docs/usage/exporting.rst
0.86511
0.748582
exporting.rst
pypi
import functools import inspect from typing import List from .datastructures import Context from .exceptions import DataError, FieldError from .iteration import atoms from .transforms import import_loop, validation_converter from .undefined import Undefined __all__: List[str] = [] def schema_from(obj): try: return obj._schema except AttributeError: return obj def validate( schema, mutable, raw_data=None, trusted_data=None, partial=False, strict=False, convert=True, context=None, **kwargs ): """ Validate some untrusted data using a model. Trusted data can be passed in the `trusted_data` parameter. :param schema: The Schema to use as source for validation. :param mutable: A mapping or instance that can be changed during validation by Schema functions. :param raw_data: A mapping or instance containing new data to be validated. :param partial: Allow partial data to validate; useful for PATCH requests. Essentially drops the ``required=True`` arguments from field definitions. Default: False :param strict: Complain about unrecognized keys. Default: False :param trusted_data: A ``dict``-like structure that may contain already validated data. :param convert: Controls whether to perform import conversion before validating. Can be turned off to skip an unnecessary conversion step if all values are known to have the right datatypes (e.g., when validating immediately after the initial import). Default: True :returns: data ``dict`` containing the valid raw_data plus ``trusted_data``. If errors are found, they are raised as a ValidationError with a list of errors attached. """ if raw_data is None: raw_data = mutable context = context or get_validation_context( partial=partial, strict=strict, convert=convert ) errors = {} try: data = import_loop( schema, mutable, raw_data, trusted_data=trusted_data, context=context, **kwargs ) except DataError as exc: errors = dict(exc.errors) data = exc.partial_data errors.update(_validate_model(schema, mutable, data, context)) if errors: raise DataError(errors, data) return data def _validate_model(schema, mutable, data, context): """ Validate data using model level methods. :param schema: The Schema to validate ``data`` against. :param mutable: A mapping or instance that will be passed to the validator containing the original data and that can be mutated. :param data: A dict with data to validate. Invalid items are removed from it. :returns: Errors of the fields that did not pass validation. """ errors = {} invalid_fields = [] def has_validator(atom): return ( atom.value is not Undefined and atom.name in schema_from(schema).validators ) for field_name, field, value in atoms(schema, data, filter=has_validator): try: schema_from(schema).validators[field_name](mutable, data, value, context) except (FieldError, DataError) as exc: serialized_field_name = field.serialized_name or field_name errors[serialized_field_name] = exc.errors invalid_fields.append(field_name) for field_name in invalid_fields: data.pop(field_name) return errors def get_validation_context(**options): validation_options = { "field_converter": validation_converter, "partial": False, "strict": False, "convert": True, "validate": True, "new": False, } validation_options.update(options) return Context(**validation_options) def prepare_validator(func, argcount): if isinstance(func, classmethod): func = func.__get__(object).__func__ func_args = inspect.getfullargspec(func).args if len(func_args) < argcount: @functools.wraps(func) def newfunc(*args, **kwargs): sentinel = object() if not kwargs or kwargs.pop("context", sentinel) is sentinel: args = args[:-1] return func(*args, **kwargs) return newfunc return func
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/validate.py
0.862685
0.534795
validate.py
pypi
from typing import ( TYPE_CHECKING, Any, Callable, Iterable, Mapping, NamedTuple, Optional, Tuple, ) from .undefined import Undefined if TYPE_CHECKING: from schematics.schema import Schema class Atom(NamedTuple): name: Optional[str] = None field: Optional[str] = None value: Any = None def schema_from(obj): try: return obj._schema except AttributeError: return obj def atoms( schema: "Schema", mapping: Mapping, keys: Tuple[str, ...] = tuple(Atom._fields), filter: Callable[[Atom], bool] = None, ) -> Iterable[Atom]: """ Iterator for the atomic components of a model definition and relevant data that creates a 3-tuple of the field's name, its type instance and its value. :type schema: schematics.schema.Schema :param schema: The Schema definition. :type mapping: Mapping :param mapping: The structure where fields from schema are mapped to values. The only expectation for this structure is that it implements a ``Mapping`` interface. :type keys: Tuple[str, str, str] :param keys: Tuple specifying the output of the iterator. Valid keys are: `name`: the field name `field`: the field descriptor object `value`: the current value set on the field Specifying invalid keys will raise an exception. :type filter: Optional[Callable[[Atom], bool]] :param filter: Function to filter out atoms from the iteration. :rtype: Iterable[Atom] """ if not set(keys).issubset(Atom._fields): raise TypeError("invalid key specified") has_name = "name" in keys has_field = "field" in keys has_value = (mapping is not None) and ("value" in keys) for field_name, field in schema_from(schema).fields.items(): value = Undefined if has_value: try: value = mapping[field_name] except Exception: value = Undefined atom_tuple = Atom( name=field_name if has_name else None, field=field if has_field else None, value=value, ) if filter is None: yield atom_tuple elif filter(atom_tuple): yield atom_tuple class atom_filter: """Group for the default filter functions.""" @staticmethod def has_setter(atom): return getattr(atom.field, "fset", None) is not None @staticmethod def not_setter(atom): return not atom_filter.has_setter(atom)
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/iteration.py
0.905873
0.423995
iteration.py
pypi
from collections.abc import Set class Role(Set): """ A ``Role`` object can be used to filter specific fields against a sequence. The ``Role`` contains two things: a set of names and a function. The function describes how to filter, taking a field name as input and then returning ``True`` or ``False`` to indicate that field should or should not be skipped. A ``Role`` can be operated on as a ``Set`` object representing the fields it has an opinion on. When Roles are combined with other roles, only the filtering behavior of the first role is used. """ def __init__(self, function, fields): self.function = function self.fields = set(fields) def _from_iterable(self, iterable): return Role(self.function, iterable) def __contains__(self, value): return value in self.fields def __iter__(self): return iter(self.fields) def __len__(self): return len(self.fields) def __eq__(self, other): return ( self.function.__name__ == other.function.__name__ and self.fields == other.fields ) def __str__(self): fields = ", ".join(f"'{f}'" for f in self.fields) return f"{self.function.__name__}({fields})" def __repr__(self): return f"<Role {self}>" # edit role fields def __add__(self, other): fields = self.fields.union(other) return self._from_iterable(fields) def __sub__(self, other): fields = self.fields.difference(other) return self._from_iterable(fields) # apply role to field def __call__(self, name, value): return self.function(name, value, self.fields) # static filter functions @staticmethod def wholelist(name, value, seq): """ Accepts a field name, value, and a field list. This function implements acceptance of all fields by never requesting a field be skipped, thus returns False for all input. :param name: The field name to inspect. :param value: The field's value. :param seq: The list of fields associated with the ``Role``. """ return False @staticmethod def whitelist(name, value, seq): """ Implements the behavior of a whitelist by requesting a field be skipped whenever its name is not in the list of fields. :param name: The field name to inspect. :param value: The field's value. :param seq: The list of fields associated with the ``Role``. """ if seq is not None and len(seq) > 0: return name not in seq return True @staticmethod def blacklist(name, value, seq): """ Implements the behavior of a blacklist by requesting a field be skipped whenever its name is found in the list of fields. :param name: The field name to inspect. :param value: The field's value. :param seq: The list of fields associated with the ``Role``. """ if seq is not None and len(seq) > 0: return name in seq return False
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/role.py
0.929696
0.73456
role.py
pypi
from collections.abc import Mapping, Sequence from typing import List __all__: List[str] = [] class DataObject: """ An object for holding data as attributes. ``DataObject`` can be instantiated like ``dict``:: >>> d = DataObject({'one': 1, 'two': 2}, three=3) >>> d.__dict__ {'one': 1, 'two': 2, 'three': 3} Attributes are accessible via the regular dot notation (``d.x``) as well as the subscription syntax (``d['x']``):: >>> d.one == d['one'] == 1 True To convert a ``DataObject`` into a dictionary, use ``d._to_dict()``. ``DataObject`` implements the following collection-like operations: * iteration through attributes as name-value pairs * ``'x' in d`` for membership tests * ``len(d)`` to get the number of attributes Additionally, the following methods are equivalent to their ``dict` counterparts: ``_clear``, ``_get``, ``_keys``, ``_items``, ``_pop``, ``_setdefault``, ``_update``. An advantage of ``DataObject`` over ``dict` subclasses is that every method name in ``DataObject`` begins with an underscore, so attributes like ``"update"`` or ``"values"`` are valid. """ def __init__(self, *args, **kwargs): source = args[0] if args else {} self._update(source, **kwargs) def __repr__(self): return f"{self.__class__.__name__}({self.__dict__!r})" def _copy(self): return self.__class__(self) __copy__ = _copy def __eq__(self, other): return isinstance(other, DataObject) and self.__dict__ == other.__dict__ def __iter__(self): return iter(self.__dict__.items()) def _update(self, source=None, **kwargs): if isinstance(source, DataObject): source = source.__dict__ self.__dict__.update(source, **kwargs) def _setdefaults(self, source): if isinstance(source, dict): source = source.items() for name, value in source: self._setdefault(name, value) return self def _to_dict(self): d = dict(self.__dict__) for k, v in d.items(): if isinstance(v, DataObject): d[k] = v._to_dict() return d def __setitem__(self, key, value): self.__dict__[key] = value def __getitem__(self, key): return self.__dict__[key] def __delitem__(self, key): del self.__dict__[key] def __len__(self): return len(self.__dict__) def __contains__(self, key): return key in self.__dict__ def _clear(self): return self.__dict__.clear() def _get(self, *args): return self.__dict__.get(*args) def _items(self): return self.__dict__.items() def _keys(self): return self.__dict__.keys() def _pop(self, *args): return self.__dict__.pop(*args) def _setdefault(self, *args): return self.__dict__.setdefault(*args) class Context(DataObject): _fields = () def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self._fields: unknowns = [name for name in self._keys() if name not in self._fields] if unknowns: raise ValueError(f"Unexpected field names: {unknowns!r}") @classmethod def _new(cls, *args, **kwargs): if len(args) > len(cls._fields): raise TypeError("Too many positional arguments") return cls(zip(cls._fields, args), **kwargs) @classmethod def _make(cls, obj): if obj is None: return cls() elif isinstance(obj, cls): return obj else: return cls(obj) def __setattr__(self, name, value): if name in self: raise TypeError(f"Field '{name}' already set") super().__setattr__(name, value) def _branch(self, **kwargs): if not kwargs: return self items = dict( ((k, v) for k, v in kwargs.items() if v is not None and v != self[k]) ) if items: return self.__class__(self, **items) else: return self def _setdefaults(self, source): if not isinstance(source, dict): source = source.__dict__ new_values = source.copy() new_values.update(self.__dict__) self.__dict__.update(new_values) return self def __bool__(self): return True __nonzero__ = __bool__ class FrozenDict(Mapping): def __init__(self, value): self._value = dict(value) def __getitem__(self, key): return self._value[key] def __iter__(self): return iter(self._value) def __len__(self): return len(self._value) def __hash__(self): try: return self._hash except AttributeError: self._hash = 0 for k, v in self._value.items(): self._hash ^= hash(k) self._hash ^= hash(v) return self._hash def __repr__(self): return repr(self._value) def __str__(self): return str(self._value) class FrozenList(Sequence): def __init__(self, value): self._list = list(value) def __getitem__(self, index): return self._list[index] def __len__(self): return len(self._list) def __hash__(self): try: return self._hash except AttributeError: self._hash = 0 for e in self._list: self._hash ^= hash(e) return self._hash def __repr__(self): return repr(self._list) def __str__(self): return str(self._list) def __eq__(self, other): if len(self) != len(other): return False for i in range(len(self)): if self[i] != other[i]: return False return True
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/datastructures.py
0.909872
0.480662
datastructures.py
pypi
import inspect from collections import ChainMap, OrderedDict from copy import deepcopy from types import FunctionType, MappingProxyType from typing import List from . import schema from .datastructures import Context from .exceptions import ( DataError, MockCreationError, UndefinedValueError, UnknownFieldError, ) from .iteration import atoms from .transforms import convert, export_loop, to_native, to_primitive from .types.base import BaseType from .types.serializable import Serializable from .undefined import Undefined from .util import get_ident from .validate import prepare_validator, validate __all__: List[str] = [] class FieldDescriptor: """ ``FieldDescriptor`` instances serve as field accessors on models. """ def __init__(self, name): """ :param name: The field's name """ self.name = name def __get__(self, instance, cls): """ For a model instance, returns the field's current value. For a model class, returns the field's type object. """ if instance is None: return cls._schema.fields[self.name] value = instance._data.get(self.name, Undefined) if value is Undefined: raise UndefinedValueError(instance, self.name) return value def __set__(self, instance, value): """ Sets the field's value. """ field = instance._schema.fields[self.name] value = field.pre_setattr(value) instance._data.converted[self.name] = value def __delete__(self, instance): """ Deletes the field's value. """ del instance._data[self.name] class ModelMeta(type): """ Metaclass for Models. """ def __new__(mcs, name, bases, attrs): """ This metaclass parses the declarative Model into a corresponding Schema, then adding it as the `_schema` attribute to the host class. """ # Structures used to accumulate meta info fields = OrderedDict() validator_functions = {} # Model level options_members = {} # Accumulate metas info from parent classes for base in reversed(bases): if hasattr(base, "_schema"): fields.update(deepcopy(base._schema.fields)) options_members.update(dict(base._schema.options)) validator_functions.update(base._schema.validators) # Parse this class's attributes into schema structures for key, value in attrs.items(): if key.startswith("validate_") and isinstance( value, (FunctionType, classmethod) ): validator_functions[key[9:]] = prepare_validator(value, 4) if isinstance(value, BaseType): fields[key] = value elif isinstance(value, Serializable): fields[key] = value # Convert declared fields into descriptors for new class fields = OrderedDict( sorted( (kv for kv in fields.items()), key=lambda i: i[1]._position_hint, ) ) for key, field in fields.items(): if isinstance(field, BaseType): attrs[key] = FieldDescriptor(key) elif isinstance(field, Serializable): attrs[key] = field klass = type.__new__(mcs, name, bases, attrs) # Parse schema options options = mcs._read_options(name, bases, attrs, options_members) # Parse meta data into new schema klass._schema = schema.Schema( name, model=klass, options=options, validators=validator_functions, *(schema.Field(k, t) for k, t in fields.items()), ) return klass @classmethod def _read_options(mcs, name, bases, attrs, options_members): """ Parses model `Options` class into a `SchemaOptions` instance. """ options_class = attrs.get("__optionsclass__", schema.SchemaOptions) if "Options" in attrs: for key, value in inspect.getmembers(attrs["Options"]): if key.startswith("__"): continue if key.startswith("_"): extras = options_members.get("extras", {}).copy() extras.update({key: value}) options_members["extras"] = extras elif key == "roles": roles = options_members.get("roles", {}).copy() roles.update(value) options_members[key] = roles else: options_members[key] = value return options_class(**options_members) class ModelDict(ChainMap): __slots__ = ["_unsafe", "_converted", "__valid", "_valid"] def __init__(self, unsafe=None, converted=None, valid=None): self._unsafe = unsafe if unsafe is not None else {} self._converted = converted if converted is not None else {} self.__valid = valid if valid is not None else {} self._valid = MappingProxyType(self.__valid) super().__init__(self._unsafe, self._converted, self._valid) @property def unsafe(self): return self._unsafe @unsafe.setter def unsafe(self, value): self._unsafe = value self.maps[0] = self._unsafe @property def converted(self): return self._converted @converted.setter def converted(self, value): self._converted = value self.maps[1] = self._converted @property def valid(self): return self._valid @valid.setter def valid(self, value): self._valid = MappingProxyType(value) self.maps[2] = self._valid def __delitem__(self, key): did_delete = False for data in [self.__valid, self._converted, self._unsafe]: try: del data[key] did_delete = True except KeyError: pass if not did_delete: raise KeyError(key) def __repr__(self): return repr(dict(self)) class Model(metaclass=ModelMeta): """ Enclosure for fields and validation. Same pattern deployed by Django models, SQLAlchemy declarative extension and other developer friendly libraries. :param Mapping raw_data: The data to be imported into the model instance. :param Mapping deserialize_mapping: Can be used to provide alternative input names for fields. Values may be strings or lists of strings, keyed by the actual field name. :param bool partial: Allow partial data to validate. Essentially drops the ``required=True`` settings from field definitions. Default: True :param bool strict: Complain about unrecognized keys. Default: True """ def __init__( self, raw_data=None, trusted_data=None, deserialize_mapping=None, init=True, partial=True, strict=True, validate=False, app_data=None, lazy=False, **kwargs, ): kwargs.setdefault("init_values", init) kwargs.setdefault("apply_defaults", init) if lazy: self._data = ModelDict(unsafe=raw_data, valid=trusted_data) return self._data = ModelDict(valid=trusted_data) data = self._convert( raw_data, trusted_data=trusted_data, mapping=deserialize_mapping, partial=partial, strict=strict, validate=validate, new=True, app_data=app_data, **kwargs, ) self._data.converted = data if validate: self.validate(partial=partial, app_data=app_data, **kwargs) def validate(self, partial=False, convert=True, app_data=None, **kwargs): """ Validates the state of the model. If the data is invalid, raises a ``DataError`` with error messages. :param bool partial: Allow partial data to validate. Essentially drops the ``required=True`` settings from field definitions. Default: False :param convert: Controls whether to perform import conversion before validating. Can be turned off to skip an unnecessary conversion step if all values are known to have the right datatypes (e.g., when validating immediately after the initial import). Default: True """ if not self._data.converted and partial: return # no new input data to validate try: data = self._convert( validate=True, partial=partial, convert=convert, app_data=app_data, **kwargs, ) self._data.valid = data except DataError as e: valid = dict(self._data.valid) valid.update(e.partial_data) self._data.valid = valid raise finally: self._data.converted = {} def import_data(self, raw_data, recursive=False, **kwargs): """ Converts and imports the raw data into an existing model instance. :param raw_data: The data to be imported. """ data = self._convert( raw_data, trusted_data=dict(self), recursive=recursive, **kwargs ) self._data.converted.update(data) if kwargs.get("validate"): self.validate(convert=False) return self def _convert(self, raw_data=None, context=None, **kwargs): """ Converts the instance raw data into richer Python constructs according to the fields on the model, validating data if requested. :param raw_data: New data to be imported and converted """ raw_data = ( {key: raw_data[key] for key in raw_data} if raw_data else self._data.converted ) kwargs["trusted_data"] = kwargs.get("trusted_data") or {} kwargs["convert"] = getattr(context, "convert", kwargs.get("convert", True)) if self._data.unsafe: self._data.unsafe.update(raw_data) raw_data = self._data.unsafe self._data.unsafe = {} kwargs["convert"] = True should_validate = getattr(context, "validate", kwargs.get("validate", False)) func = validate if should_validate else convert return func( self._schema, self, raw_data=raw_data, oo=True, context=context, **kwargs ) def export(self, field_converter=None, role=None, app_data=None, **kwargs): return export_loop( self._schema, self, field_converter=field_converter, role=role, app_data=app_data, **kwargs, ) def to_native(self, role=None, app_data=None, **kwargs): return to_native(self._schema, self, role=role, app_data=app_data, **kwargs) def to_primitive(self, role=None, app_data=None, **kwargs): return to_primitive(self._schema, self, role=role, app_data=app_data, **kwargs) def serialize(self, *args, **kwargs): raw_data = self._data.converted try: self.validate(apply_defaults=True) except DataError: pass data = self.to_primitive(*args, **kwargs) self._data.converted = raw_data return data def atoms(self): """ Iterator for the atomic components of a model definition and relevant data that creates a 3-tuple of the field's name, its type instance and its value. """ return atoms(self._schema, self) def __iter__(self): return ( k for k in self._schema.fields if k in self._data and getattr(self._schema.fields[k], "fset", None) is None ) def keys(self): return list(iter(self)) def items(self): return [(k, self._data[k]) for k in self] def values(self): return [self._data[k] for k in self] def get(self, key, default=None): return getattr(self, key, default) @classmethod def _append_field(cls, field_name, field_type): """ Add a new field to this class. :type field_name: str :param field_name: The name of the field to add. :type field_type: BaseType :param field_type: The type to use for the field. """ cls._schema.append_field(schema.Field(field_name, field_type)) setattr(cls, field_name, FieldDescriptor(field_name)) @classmethod def get_mock_object(cls, context=None, overrides=None): """Get a mock object. :param dict context: :param dict overrides: overrides for the model """ context = Context._make(context) context._setdefault("memo", set()) context.memo.add(cls) values = {} overrides = overrides or {} for name, field in cls._schema.fields.items(): if name in overrides: continue if getattr(field, "model_class", None) in context.memo: continue try: values[name] = field.mock(context) except MockCreationError as exc: raise MockCreationError(f"{name}: {exc.args[0]}") from exc values.update(overrides) return cls(values) def __getitem__(self, name): if name in self._schema.fields: return getattr(self, name) raise UnknownFieldError(self, name) def __setitem__(self, name, value): if name in self._schema.fields: return setattr(self, name, value) raise UnknownFieldError(self, name) def __delitem__(self, name): if name in self._schema.fields: return delattr(self, name) raise UnknownFieldError(self, name) def __contains__(self, name): serializables = { k for k, t in self._schema.fields.items() if isinstance(t, Serializable) } return ( name in self._data and getattr(self, name, Undefined) is not Undefined ) or name in serializables def __len__(self): return len(self._data) def __eq__(self, other, memo=set()): if self is other: return True if type(self) is not type(other): return NotImplemented key = (id(self), id(other), get_ident()) if key in memo: return True memo.add(key) try: for k in self: if self.get(k) != other.get(k): return False return True finally: memo.remove(key) def __ne__(self, other): return not self == other def __repr__(self): model = self.__class__.__name__ info = self._repr_info() if info: return f"<{model}: {info}>" return f"<{model} instance>" def _repr_info(self): """ Subclasses may implement this method to augment the ``__repr__()`` output for the instance:: class Person(Model): ... def _repr_info(self): return self.name >>> Person({'name': 'Mr. Pink'}) <Person: Mr. Pink> """ return None
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/models.py
0.861553
0.200773
models.py
pypi
import json from collections.abc import Mapping, Sequence from typing import Optional, Type from .datastructures import FrozenDict, FrozenList from .translator import LazyText __all__ = [ "BaseError", "ErrorMessage", "FieldError", "ConversionError", "ValidationError", "StopValidationError", "CompoundError", "DataError", "MockCreationError", "UndefinedValueError", "UnknownFieldError", ] class BaseError(Exception): def __init__(self, errors): """ The base class for all Schematics errors. message should be a human-readable message, while errors is a machine-readable list, or dictionary. if None is passed as the message, and error is populated, the primitive representation will be serialized. the Python logging module expects exceptions to be hashable and therefore immutable. As a result, it is not possible to mutate BaseError's error list or dict after initialization. """ errors = self._freeze(errors) super().__init__(errors) @property def errors(self): return self.args[0] def to_primitive(self): """ converts the errors dict to a primitive representation of dicts, list and strings. """ try: return self._primitive except AttributeError: self._primitive = self._to_primitive(self.errors) return self._primitive @staticmethod def _freeze(obj): """freeze common data structures to something immutable.""" if isinstance(obj, dict): return FrozenDict(obj) if isinstance(obj, list): return FrozenList(obj) return obj @classmethod def _to_primitive(cls, obj): """recursive to_primitive for basic data types.""" if isinstance(obj, str): return obj if isinstance(obj, Sequence): return [cls._to_primitive(e) for e in obj] if isinstance(obj, Mapping): return dict((k, cls._to_primitive(v)) for k, v in obj.items()) return str(obj) def __str__(self): return json.dumps(self.to_primitive()) def __repr__(self): return f"{self.__class__.__name__}({self.errors!r})" def __hash__(self): return hash(self.errors) def __eq__(self, other): if type(self) is type(other): return self.errors == other.errors return self.errors == other def __ne__(self, other): return not (self == other) class ErrorMessage: def __init__(self, summary, info=None): self.type = None self.summary = summary self.info = info def __repr__(self): return f"{self.__class__.__name__}({self.summary!r}, {self.info!r})" def __str__(self): if self.info: return f"{self.summary}: {self._info_as_str()}" return str(self.summary) def _info_as_str(self): if isinstance(self.info, int): return str(self.info) if isinstance(self.info, str): return f'"{self.info}"' return str(self.info) def __eq__(self, other): if isinstance(other, ErrorMessage): return ( self.summary == other.summary and self.type == other.type and self.info == other.info ) if isinstance(other, str): return self.summary == other return False def __ne__(self, other): return not (self == other) def __hash__(self): return hash((self.summary, self.type, self.info)) class FieldError(BaseError, Sequence): type: Optional[Type[Exception]] = None def __init__(self, *args, **kwargs): if type(self) is FieldError: raise NotImplementedError( "Please raise either ConversionError or ValidationError." ) if len(args) == 0: raise TypeError("Please provide at least one error or error message.") if kwargs: items = [ErrorMessage(*args, **kwargs)] elif len(args) == 1: arg = args[0] if isinstance(arg, list): items = list(arg) else: items = [arg] else: items = args errors = [] for item in items: if isinstance(item, (str, LazyText)): errors.append(ErrorMessage(str(item))) elif isinstance(item, tuple): errors.append(ErrorMessage(*item)) elif isinstance(item, ErrorMessage): errors.append(item) elif isinstance(item, self.__class__): errors.extend(item.errors) else: raise TypeError( f"'{type(item).__name__}()' object is neither a {type(self).__name__} nor an error message." ) for error in errors: error.type = self.type or type(self) super().__init__(errors) def __contains__(self, value): return value in self.errors def __getitem__(self, index): return self.errors[index] def __iter__(self): return iter(self.errors) def __len__(self): return len(self.errors) class ConversionError(FieldError, TypeError): """Exception raised when data cannot be converted to the correct python type""" class ValidationError(FieldError, ValueError): """Exception raised when invalid data is encountered.""" class StopValidationError(ValidationError): """Exception raised when no more validation need occur.""" type = ValidationError class CompoundError(BaseError): def __init__(self, errors): if not isinstance(errors, dict): raise TypeError("Compound errors must be reported as a dictionary.") for key, value in errors.items(): if isinstance(value, CompoundError): errors[key] = value.errors else: errors[key] = value super().__init__(errors) class DataError(CompoundError): def __init__(self, errors, partial_data=None): super().__init__(errors) self.partial_data = partial_data class MockCreationError(ValueError): """Exception raised when a mock value cannot be generated.""" class UndefinedValueError(AttributeError, KeyError): """Exception raised when accessing a field with an undefined value.""" def __init__(self, model, name): msg = f"'{model.__class__.__name__}' instance has no value for field '{name}'" super().__init__(msg) class UnknownFieldError(KeyError): """Exception raised when attempting to access a nonexistent field using the subscription syntax.""" def __init__(self, model, name): msg = f"Model '{model.__class__.__name__}' has no field named '{name}'" super().__init__(msg)
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/exceptions.py
0.873997
0.215268
exceptions.py
pypi
import functools from ..transforms import convert, to_primitive from ..validate import validate def _callback_wrap(data, schema, transform, *args, **kwargs): return transform(schema, data, *args, **kwargs) class Machine: """A poor man's state machine.""" states = ("raw", "converted", "validated", "serialized") transitions = ( {"trigger": "init", "to": "raw"}, {"trigger": "convert", "from": "raw", "to": "converted"}, {"trigger": "validate", "from": "converted", "to": "validated"}, {"trigger": "serialize", "from": "validated", "to": "serialized"}, ) callbacks = { "convert": functools.partial(_callback_wrap, transform=convert, partial=True), "validate": functools.partial( _callback_wrap, transform=validate, convert=False, partial=False ), "serialize": functools.partial(_callback_wrap, transform=to_primitive), } def __init__(self, data, *args): self.state = self._transition(trigger="init")["to"] self.data = data self.args = args def __getattr__(self, name): return functools.partial(self.trigger, name) def _transition(self, trigger=None, src_state=None, dst_state=None): try: return next( self._transitions( trigger=trigger, src_state=src_state, dst_state=dst_state ) ) except StopIteration: return None def _transitions(self, trigger=None, src_state=None, dst_state=None): def pred(d, key, var): return d.get(key) == var if var is not None else True return ( d for d in self.transitions if pred(d, "trigger", trigger) and pred(d, "from", src_state) and pred(d, "to", dst_state) ) def trigger(self, trigger): transition = self._transition(trigger=trigger, src_state=self.state) if not transition: raise AttributeError(trigger) callback = self.callbacks.get(trigger) self.data = callback(self.data, *self.args) if callback else self.data self.state = transition["to"] def can(self, state): return bool(self._transition(src_state=self.state, dst_state=state)) def cannot(self, state): return not self.can(state)
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/contrib/machine.py
0.68437
0.328987
machine.py
pypi
from enum import Enum from ..exceptions import ConversionError from ..translator import _ from ..types import BaseType class EnumType(BaseType): """A field type allowing to use native enums as values. Restricts values to enum members and (optionally) enum values. `use_values` - if set to True allows do assign enumerated values to the field. >>> import enum >>> class E(enum.Enum): ... A = 1 ... B = 2 >>> from schematics import Model >>> class AModel(Model): ... foo = EnumType(E) >>> a = AModel() >>> a.foo = E.A >>> a.foo.value == 1 """ MESSAGES = { "convert": _("Couldn't interpret '{0}' as member of {1}."), } def __init__(self, enum, use_values=False, **kwargs): """ :param enum: Enum class to which restrict values assigned to the field. :param use_values: If true, also values of the enum (right-hand side) can be assigned here. Other args are passed to superclass. """ self._enum_class = enum self._use_values = use_values super().__init__(**kwargs) def to_native(self, value, context=None): if isinstance(value, self._enum_class): return value by_name = self._find_by_name(value) if by_name: return by_name by_value = self._find_by_value(value) if by_value: return by_value raise ConversionError(self.messages["convert"].format(value, self._enum_class)) def _find_by_name(self, value): if isinstance(value, str): try: return self._enum_class[value] except KeyError: pass return None def _find_by_value(self, value): if not self._use_values: return None for member in self._enum_class: if member.value == value: return member return None def to_primitive(self, value, context=None): if isinstance(value, Enum): if self._use_values: return value.value return value.name return str(value)
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/contrib/enum_type.py
0.838845
0.330201
enum_type.py
pypi
import itertools from collections.abc import Iterable, Mapping, Sequence from typing import Type, TypeVar from ..common import DROP, NONEMPTY, NOT_NONE from ..exceptions import BaseError, CompoundError, ConversionError, ValidationError from ..transforms import ( convert, export_loop, get_export_context, get_import_context, to_native_converter, to_primitive_converter, ) from ..translator import _ from ..util import get_all_subclasses, import_string from .base import BaseType, get_value_in T = TypeVar("T") __all__ = [ "CompoundType", "MultiType", "ModelType", "ListType", "DictType", "PolyModelType", ] class CompoundType(BaseType): def __init__(self, **kwargs): super().__init__(**kwargs) self.is_compound = True try: self.field.parent_field = self except AttributeError: pass def _setup(self, field_name, owner_model): # Recursively set up inner fields. try: field = self.field except AttributeError: pass else: field._setup(None, owner_model) super()._setup(field_name, owner_model) def convert(self, value, context=None): context = context or get_import_context() return self._convert(value, context) def _convert(self, value, context): raise NotImplementedError def export(self, value, format, context=None): context = context or get_export_context() return self._export(value, format, context) def _export(self, value, format, context): raise NotImplementedError def to_native(self, value, context=None): context = context or get_export_context(to_native_converter) return to_native_converter(self, value, context) def to_primitive(self, value, context=None): context = context or get_export_context(to_primitive_converter) return to_primitive_converter(self, value, context) def _init_field(self, field, options): """ Instantiate the inner field that represents each element within this compound type. In case the inner field is itself a compound type, its inner field can be provided as the ``nested_field`` keyword argument. """ if not isinstance(field, BaseType): nested_field = options.pop("nested_field", None) or options.pop( "compound_field", None ) if nested_field: field = field(field=nested_field, **options) else: field = field(**options) return field MultiType = CompoundType class ModelType(CompoundType): """A field that can hold an instance of the specified model.""" primitive_type = dict @property def native_type(self): return self.model_class @property def fields(self): return self.model_class.fields @property def model_class(self): if self._model_class: return self._model_class model_class = import_string(self.model_name) self._model_class = model_class return model_class def __init__(self, model_spec: Type[T], **kwargs): from ..models import ModelMeta if isinstance(model_spec, ModelMeta): self._model_class = model_spec self.model_name = self.model_class.__name__ elif isinstance(model_spec, str): self._model_class = None self.model_name = model_spec else: raise TypeError( "ModelType: Expected a model, got an argument " "of the type '{}'.".format(model_spec.__class__.__name__) ) super().__init__(**kwargs) def _repr_info(self): return self.model_class.__name__ def _mock(self, context=None): return self.model_class.get_mock_object(context) def _setup(self, field_name, owner_model): # Resolve possible name-based model reference. if not self._model_class: if self.model_name == owner_model.__name__: self._model_class = owner_model else: pass # Intentionally left blank, it will be setup later. super()._setup(field_name, owner_model) def pre_setattr(self, value): from ..models import Model if value is not None and not isinstance(value, Model): if not isinstance(value, dict): raise ConversionError(_("Model conversion requires a model or dict")) value = self.model_class(value) return value def _convert(self, value, context): field_model_class = self.model_class if isinstance(value, field_model_class): model_class = type(value) elif isinstance(value, dict): model_class = field_model_class else: raise ConversionError( _("Input must be a mapping or '%s' instance") % field_model_class.__name__ ) if context.convert and context.oo: return model_class(value, context=context) return convert(model_class._schema, value, context=context) def _export(self, value, format, context): from ..models import Model if isinstance(value, Model): model_class = type(value) else: model_class = self.model_class return export_loop(model_class, value, context=context) class ListType(CompoundType): """A field for storing a list of items, all of which must conform to the type specified by the ``field`` parameter. Use it like this:: ... categories = ListType(StringType) """ primitive_type = list native_type = list def __init__(self, field: T, min_size=None, max_size=None, **kwargs): """Create a list of objects of type `field`.""" self.field = self._init_field(field, kwargs) self.min_size = min_size self.max_size = max_size validators = [self.check_length] + kwargs.pop("validators", []) super().__init__(validators=validators, **kwargs) @property def model_class(self): return self.field.model_class def _repr_info(self): return self.field.__class__.__name__ def _mock(self, context=None): random_length = get_value_in(self.min_size, self.max_size) return [self.field._mock(context) for dummy in range(random_length)] def _coerce(self, value): if isinstance(value, list): return value if isinstance(value, (str, Mapping)): # unacceptable iterables pass elif isinstance(value, Sequence): return value elif isinstance(value, Iterable): return value raise ConversionError(_("Could not interpret the value as a list")) def _convert(self, value, context): value = self._coerce(value) data = [] errors = {} for index, item in enumerate(value): try: data.append(context.field_converter(self.field, item, context)) except BaseError as exc: errors[index] = exc if errors: raise CompoundError(errors) return data def check_length(self, value, context): list_length = len(value) if value else 0 if self.min_size is not None and list_length < self.min_size: message = ( { True: _("Please provide at least %d item."), False: _("Please provide at least %d items."), }[self.min_size == 1] ) % self.min_size raise ValidationError(message) if self.max_size is not None and list_length > self.max_size: message = ( { True: _("Please provide no more than %d item."), False: _("Please provide no more than %d items."), }[self.max_size == 1] ) % self.max_size raise ValidationError(message) def _export(self, list_instance, format, context): """Loops over each item in the model and applies either the field transform or the multitype transform. Essentially functions the same as `transforms.export_loop`. """ data = [] _export_level = self.field.get_export_level(context) if _export_level == DROP: return data for value in list_instance: shaped = self.field.export(value, format, context) if shaped is None: if _export_level <= NOT_NONE: continue elif self.field.is_compound and len(shaped) == 0: if _export_level <= NONEMPTY: continue data.append(shaped) return data class DictType(CompoundType): """A field for storing a mapping of items, the values of which must conform to the type specified by the ``field`` parameter. Use it like this:: ... categories = DictType(StringType) """ primitive_type = dict native_type = dict def __init__(self, field, coerce_key=None, **kwargs): """Create a dict with str keys and type `field` values.""" self.field = self._init_field(field, kwargs) self.coerce_key = coerce_key or str super().__init__(**kwargs) @property def model_class(self): return self.field.model_class def _repr_info(self): return self.field.__class__.__name__ def _convert(self, value, context, safe=False): if not isinstance(value, Mapping): raise ConversionError(_("Only mappings may be used in a DictType")) data = {} errors = {} for k, v in value.items(): try: data[self.coerce_key(k)] = context.field_converter( self.field, v, context ) except BaseError as exc: errors[k] = exc if errors: raise CompoundError(errors) return data def _export(self, dict_instance, format, context): """Loops over each item in the model and applies either the field transform or the multitype transform. Essentially functions the same as `transforms.export_loop`. """ data = {} _export_level = self.field.get_export_level(context) if _export_level == DROP: return data for key, value in dict_instance.items(): shaped = self.field.export(value, format, context) if shaped is None: if _export_level <= NOT_NONE: continue elif self.field.is_compound and len(shaped) == 0: if _export_level <= NONEMPTY: continue data[key] = shaped return data class PolyModelType(CompoundType): """A field that accepts an instance of any of the specified models.""" primitive_type = dict native_type = None # cannot be determined from a PolyModelType instance def __init__(self, model_spec, **kwargs): from ..models import ModelMeta if isinstance(model_spec, (ModelMeta, str)): self.model_classes = (model_spec,) allow_subclasses = True elif isinstance(model_spec, Iterable): self.model_classes = tuple(model_spec) allow_subclasses = False else: raise Exception( "The first argument to PolyModelType.__init__() " "must be a model or an iterable." ) self.claim_function = kwargs.pop("claim_function", None) self.allow_subclasses = kwargs.pop("allow_subclasses", allow_subclasses) CompoundType.__init__(self, **kwargs) def _setup(self, field_name, owner_model): # Resolve possible name-based model references. resolved_classes = [] for m in self.model_classes: if isinstance(m, str): if m == owner_model.__name__: resolved_classes.append(owner_model) else: raise Exception( "PolyModelType: Unable to resolve model '{}'.".format(m) ) else: resolved_classes.append(m) self.model_classes = tuple(resolved_classes) super()._setup(field_name, owner_model) def is_allowed_model(self, model_instance): if self.allow_subclasses: if isinstance(model_instance, self.model_classes): return True else: if model_instance.__class__ in self.model_classes: return True return False def _convert(self, value, context): if value is None: return None if not context.validate: if self.is_allowed_model(value): return value if not isinstance(value, dict): if len(self.model_classes) > 1: instanceof_msg = "one of: {}".format( ", ".join(cls.__name__ for cls in self.model_classes) ) else: instanceof_msg = self.model_classes[0].__name__ raise ConversionError( _( "Please use a mapping for this field or " "an instance of {}" ).format(instanceof_msg) ) model_class = self.find_model(value) return model_class(value, context=context) def find_model(self, data): """Finds the intended type by consulting potential classes or `claim_function`.""" if self.claim_function: kls = self.claim_function(self, data) if not kls: raise Exception("Input for polymorphic field did not match any model") return kls fallback = None matching_classes = [] for kls in self._get_candidates(): try: # If a model defines a _claim_polymorphic method, use # it to see if the model matches the data. kls_claim = kls._claim_polymorphic except AttributeError: # The first model that doesn't define the hook can be # used as a default if there's no match. if not fallback: fallback = kls else: if kls_claim(data): matching_classes.append(kls) if not matching_classes and fallback: return fallback if len(matching_classes) != 1: raise Exception("Got ambiguous input for polymorphic field") return matching_classes[0] def _export(self, model_instance, format, context): model_class = model_instance.__class__ if not self.is_allowed_model(model_instance): raise Exception( "Cannot export: {} is not an allowed type".format(model_class) ) return model_instance.export(context=context) def _get_candidates(self): candidates = self.model_classes if self.allow_subclasses: candidates = itertools.chain.from_iterable( ([m] + get_all_subclasses(m) for m in candidates) ) return candidates
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/types/compound.py
0.820326
0.17075
compound.py
pypi
import copy import datetime import decimal import itertools import numbers import random import re import string import uuid from collections import OrderedDict from collections.abc import Iterable from typing import Any, Optional, Type from ..common import DEFAULT, NATIVE, NONEMPTY, PRIMITIVE from ..exceptions import ( ConversionError, MockCreationError, StopValidationError, ValidationError, ) from ..translator import _ from ..undefined import Undefined from ..util import listify from ..validate import get_validation_context, prepare_validator __all__ = [ "BaseType", "UUIDType", "StringType", "MultilingualStringType", "NumberType", "IntType", "LongType", "FloatType", "DecimalType", "HashType", "MD5Type", "SHA1Type", "BooleanType", "GeoPointType", "DateType", "DateTimeType", "UTCDateTimeType", "TimestampType", "TimedeltaType", ] def fill_template(template, min_length, max_length): return template % random_string( get_value_in( min_length, max_length, padding=len(template) - 2, required_length=1 ) ) def get_range_endpoints(min_length, max_length, padding=0, required_length=0): if min_length is None: min_length = 0 if max_length is None: max_length = max(min_length * 2, 16) if padding: max_length = max_length - padding min_length = max(min_length - padding, 0) if max_length < required_length: raise MockCreationError("This field is too short to hold the mock data") min_length = max(min_length, required_length) if max_length < min_length: raise MockCreationError("Minimum is greater than maximum") return min_length, max_length def get_value_in(min_length, max_length, padding=0, required_length=0): return random.randint( *get_range_endpoints(min_length, max_length, padding, required_length) ) _alphanumeric = string.ascii_letters + string.digits def random_string(length, chars=_alphanumeric): return "".join(random.choice(chars) for _ in range(length)) _last_position_hint = -1 _next_position_hint = itertools.count() class TypeMeta(type): """ Meta class for BaseType. Merges `MESSAGES` dict and accumulates validator methods. """ def __new__(mcs, name, bases, attrs): messages = {} validators = OrderedDict() for base in reversed(bases): try: messages.update(base.MESSAGES) except AttributeError: pass try: validators.update(base._validators) except AttributeError: pass try: messages.update(attrs["MESSAGES"]) except KeyError: pass attrs["MESSAGES"] = messages for attr_name, attr in attrs.items(): if attr_name.startswith("validate_"): validators[attr_name] = 1 attrs[attr_name] = prepare_validator(attr, 3) attrs["_validators"] = validators return type.__new__(mcs, name, bases, attrs) class BaseType(metaclass=TypeMeta): """A base class for Types in a Schematics model. Instances of this class may be added to subclasses of ``Model`` to define a model schema. Validators that need to access variables on the instance can be defined be implementing methods whose names start with ``validate_`` and accept one parameter (in addition to ``self``) :param required: Invalidate field when value is None or is not supplied. Default: False. :param default: When no data is provided default to this value. May be a callable. Default: None. :param serialized_name: The name of this field defaults to the class attribute used in the model. However if the field has another name in foreign data set this argument. Serialized data will use this value for the key name too. :param deserialize_from: A name or list of named fields for which foreign data sets are searched to provide a value for the given field. This only effects inbound data. :param choices: A list of valid choices. This is the last step of the validator chain. :param validators: A list of callables. Each callable receives the value after it has been converted into a rich python type. Default: [] :param serialize_when_none: Dictates if the field should appear in the serialized data even if the value is None. Default: None. :param messages: Override the error messages with a dict. You can also do this by subclassing the Type and defining a `MESSAGES` dict attribute on the class. A metaclass will merge all the `MESSAGES` and override the resulting dict with instance level `messages` and assign to `self.messages`. :param metadata: Dictionary for storing custom metadata associated with the field. To encourage compatibility with external tools, we suggest these keys for common metadata: - *label* : Brief human-readable label - *description* : Explanation of the purpose of the field. Used for help, tooltips, documentation, etc. """ primitive_type: Optional[Type] = None native_type: Optional[Type] = None MESSAGES = { "required": _("This field is required."), "choices": _("Value must be one of {0}."), } EXPORT_METHODS = { NATIVE: "to_native", PRIMITIVE: "to_primitive", } def __init__( self, required=False, default=Undefined, serialized_name=None, choices=None, validators=None, deserialize_from=None, export_level=None, serialize_when_none=None, messages=None, metadata=None, ): super().__init__() self.required = required self._default = default self.serialized_name = serialized_name if choices and (isinstance(choices, str) or not isinstance(choices, Iterable)): raise TypeError('"choices" must be a non-string Iterable') self.choices = choices self.deserialize_from = listify(deserialize_from) self.validators = [ getattr(self, validator_name) for validator_name in self._validators ] if validators: self.validators += (prepare_validator(func, 2) for func in validators) self._set_export_level(export_level, serialize_when_none) self.messages = dict(self.MESSAGES, **(messages or {})) self.metadata = metadata or {} self._position_hint = next(_next_position_hint) # For ordering of fields self.name = None self.owner_model = None self.parent_field = None self.typeclass = self.__class__ self.is_compound = False self.export_mapping = dict( (format, getattr(self, fname)) for format, fname in self.EXPORT_METHODS.items() ) def __repr__(self): type_ = f"{self.__class__.__name__}({self._repr_info() or ''}) instance" model = f" on {self.owner_model.__name__}" if self.owner_model else "" field = f" as '{self.name}'" if self.name else "" return f"<{type_}{model}{field}>" def _repr_info(self): return None def __call__(self, value, context=None): return self.convert(value, context) def __deepcopy__(self, memo): return copy.copy(self) def _mock(self, context=None): return None def _setup(self, field_name, owner_model): """Perform late-stage setup tasks that are run after the containing model has been created. """ self.name = field_name self.owner_model = owner_model self._input_keys = self._get_input_keys() def _set_export_level(self, export_level, serialize_when_none): if export_level is not None: self.export_level = export_level elif serialize_when_none is True: self.export_level = DEFAULT elif serialize_when_none is False: self.export_level = NONEMPTY else: self.export_level = None def get_export_level(self, context): if self.owner_model: level = self.owner_model._schema.options.export_level else: level = DEFAULT if self.export_level is not None: level = self.export_level if context.export_level is not None: level = context.export_level return level def get_input_keys(self, mapping=None): if mapping: return self._get_input_keys(mapping) return self._input_keys def _get_input_keys(self, mapping=None): input_keys = [self.name] if self.serialized_name: input_keys.append(self.serialized_name) if mapping and self.name in mapping: input_keys.extend(listify(mapping[self.name])) if self.deserialize_from: input_keys.extend(self.deserialize_from) return input_keys @property def default(self): default = self._default if callable(default): default = default() return default def pre_setattr(self, value): return value def convert(self, value, context=None): return self.to_native(value, context) def export(self, value, format, context=None): return self.export_mapping[format](value, context) def to_primitive(self, value, context=None): """Convert internal data to a value safe to serialize.""" return value def to_native(self, value, context=None): """ Convert untrusted data to a richer Python construct. """ return value def validate(self, value, context=None): """ Validate the field and return a converted value or raise a ``ValidationError`` with a list of errors raised by the validation chain. Stop the validation process from continuing through the validators by raising ``StopValidationError`` instead of ``ValidationError``. """ context = context or get_validation_context() if context.convert: value = self.convert(value, context) elif self.is_compound: self.convert(value, context) errors = [] for validator in self.validators: try: validator(value, context) except ValidationError as exc: errors.append(exc) if isinstance(exc, StopValidationError): break if errors: raise ValidationError(errors) return value def check_required(self, value, context): if self.required and (value is None or value is Undefined): if self.name is None or context and not context.partial: raise ConversionError(self.messages["required"]) def validate_choices(self, value, context): if self.choices is not None: if value not in self.choices: raise ValidationError( self.messages["choices"].format(str(self.choices)) ) def mock(self, context=None): if not self.required and not random.choice([True, False]): return self.default if self.choices is not None: return random.choice(self.choices) return self._mock(context) class UUIDType(BaseType): """A field that stores a valid UUID value.""" primitive_type = str native_type = uuid.UUID MESSAGES = { "convert": _("Couldn't interpret '{0}' value as UUID."), } def __init__(self, **kwargs): """Create a UUID field.""" super().__init__(**kwargs) def _mock(self, context=None): return uuid.uuid4() def to_native(self, value, context=None): if not isinstance(value, uuid.UUID): try: value = uuid.UUID(value) except (TypeError, ValueError) as exc: raise ConversionError(self.messages["convert"].format(value)) from exc return value def to_primitive(self, value, context=None): return str(value) class StringType(BaseType): """A Unicode string field.""" primitive_type = str native_type = str allow_casts = (int, bytes) MESSAGES = { "convert": _("Couldn't interpret '{0}' as string."), "decode": _("Invalid UTF-8 data."), "max_length": _("String value is too long."), "min_length": _("String value is too short."), "regex": _("String value did not match validation regex."), } def __init__(self, regex=None, max_length=None, min_length=None, **kwargs): """Create a typing.Text field.""" self.regex = re.compile(regex) if regex else None self.max_length = max_length self.min_length = min_length super().__init__(**kwargs) def _mock(self, context=None): return random_string(get_value_in(self.min_length, self.max_length)) def to_native(self, value, context=None): if isinstance(value, str): return value if isinstance(value, self.allow_casts): if isinstance(value, bytes): try: return str(value, "utf-8") except UnicodeError as exc: raise ConversionError( self.messages["decode"].format(value) ) from exc elif isinstance(value, bool): pass else: return str(value) raise ConversionError(self.messages["convert"].format(value)) def validate_length(self, value, context=None): length = len(value) if self.max_length is not None and length > self.max_length: raise ValidationError(self.messages["max_length"]) if self.min_length is not None and length < self.min_length: raise ValidationError(self.messages["min_length"]) def validate_regex(self, value, context=None): if self.regex is not None and self.regex.match(value) is None: raise ValidationError(self.messages["regex"]) class NumberType(BaseType): """A generic number field. Converts to and validates against `number_type` parameter. """ primitive_type: Optional[Type] = None native_type: Optional[Type] = None number_type: Optional[str] = None MESSAGES = { "number_coerce": _("Value '{0}' is not {1}."), "number_min": _("{0} value should be greater than or equal to {1}."), "number_max": _("{0} value should be less than or equal to {1}."), } def __init__(self, min_value=None, max_value=None, strict=False, **kwargs): """Create an int|float field.""" self.min_value = min_value self.max_value = max_value self.strict = strict super().__init__(**kwargs) def _mock(self, context=None): number = random.uniform(*get_range_endpoints(self.min_value, self.max_value)) return self.native_type(number) if self.native_type else number def to_native(self, value, context=None): if isinstance(value, bool): value = int(value) if isinstance(value, self.native_type): return value try: native_value = self.native_type(value) except (TypeError, ValueError): pass else: if self.native_type is float: # Float conversion is strict enough. return native_value if not self.strict and native_value == value: # Match numeric types. return native_value if isinstance(value, (str, numbers.Integral)): return native_value raise ConversionError( self.messages["number_coerce"].format(value, self.number_type.lower()) ) def validate_range(self, value, context=None): if self.min_value is not None and value < self.min_value: raise ValidationError( self.messages["number_min"].format(self.number_type, self.min_value) ) if self.max_value is not None and value > self.max_value: raise ValidationError( self.messages["number_max"].format(self.number_type, self.max_value) ) return value class IntType(NumberType): """A field that validates input as an Integer""" primitive_type = int native_type = int number_type = "Int" def __init__(self, **kwargs): """Create an int field.""" super().__init__(**kwargs) LongType = IntType class FloatType(NumberType): """A field that validates input as a Float""" primitive_type = float native_type = float number_type = "Float" def __init__(self, **kwargs): """Create a float field.""" super().__init__(**kwargs) class DecimalType(NumberType): """A fixed-point decimal number field.""" primitive_type = str native_type = decimal.Decimal number_type = "Decimal" def to_primitive(self, value, context=None): return str(value) def to_native(self, value, context=None): if isinstance(value, decimal.Decimal): return value if not isinstance(value, (str, bool)): value = str(value) try: value = decimal.Decimal(value) except (TypeError, decimal.InvalidOperation) as exc: raise ConversionError( self.messages["number_coerce"].format(value, self.number_type.lower()) ) from exc return value class HashType(StringType): MESSAGES = { "hash_length": _("Hash value is wrong length."), "hash_hex": _("Hash value is not hexadecimal."), } LENGTH = -1 def _mock(self, context=None): return random_string(self.LENGTH, string.hexdigits) def to_native(self, value, context=None): value = super().to_native(value, context) if len(value) != self.LENGTH: raise ValidationError(self.messages["hash_length"]) try: int(value, 16) except ValueError as exc: raise ConversionError(self.messages["hash_hex"]) from exc return value class MD5Type(HashType): """A field that validates input as resembling an MD5 hash.""" LENGTH = 32 class SHA1Type(HashType): """A field that validates input as resembling an SHA1 hash.""" LENGTH = 40 class BooleanType(BaseType): """A boolean field type. In addition to ``True`` and ``False``, coerces these values: + For ``True``: "True", "true", "1" + For ``False``: "False", "false", "0" """ primitive_type = bool native_type = bool TRUE_VALUES = ("True", "true", "1") FALSE_VALUES = ("False", "false", "0") def __init__(self, **kwargs): """Create a bool field.""" super().__init__(**kwargs) def _mock(self, context=None): return random.choice([True, False]) def to_native(self, value, context=None): if isinstance(value, str): if value in self.TRUE_VALUES: value = True elif value in self.FALSE_VALUES: value = False elif isinstance(value, int) and value in [0, 1]: value = bool(value) if not isinstance(value, bool): raise ConversionError(_("Must be either true or false.")) return value class DateType(BaseType): """Defaults to converting to and from ISO8601 date values.""" primitive_type = str native_type = datetime.date SERIALIZED_FORMAT = "%Y-%m-%d" MESSAGES = { "parse": _("Could not parse {0}. Should be ISO 8601 (YYYY-MM-DD)."), "parse_formats": _("Could not parse {0}. Valid formats: {1}"), } def __init__(self, formats=None, **kwargs): """Create a datetime.date field.""" if formats: self.formats = listify(formats) self.conversion_errmsg = self.MESSAGES["parse_formats"] else: self.formats = ["%Y-%m-%d"] self.conversion_errmsg = self.MESSAGES["parse"] self.serialized_format = self.SERIALIZED_FORMAT super().__init__(**kwargs) def _mock(self, context=None): return datetime.date( year=random.randrange(600) + 1900, month=random.randrange(12) + 1, day=random.randrange(28) + 1, ) def to_native(self, value, context=None): if isinstance(value, datetime.datetime): return value.date() if isinstance(value, datetime.date): return value for fmt in self.formats: try: return datetime.datetime.strptime(value, fmt).date() except (ValueError, TypeError): continue raise ConversionError( self.conversion_errmsg.format(value, ", ".join(self.formats)) ) def to_primitive(self, value, context=None): return value.strftime(self.serialized_format) class DateTimeType(BaseType): """A field that holds a combined date and time value. The built-in parser accepts input values conforming to the ISO 8601 format ``<YYYY>-<MM>-<DD>T<hh>:<mm>[:<ss.ssssss>][<z>]``. A space may be substituted for the delimiter ``T``. The time zone designator ``<z>`` may be either ``Z`` or ``±<hh>[:][<mm>]``. Values are stored as standard ``datetime.datetime`` instances with the time zone offset in the ``tzinfo`` component if available. Raw values that do not specify a time zone will be converted to naive ``datetime`` objects unless ``tzd='utc'`` is in effect. Unix timestamps are also valid input values and will be converted to UTC datetimes. :param formats: (Optional) A value or iterable of values suitable as ``datetime.datetime.strptime`` format strings, for example ``('%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f')``. If the parameter is present, ``strptime()`` will be used for parsing instead of the built-in parser. :param serialized_format: The output format suitable for Python ``strftime``. Default: ``'%Y-%m-%dT%H:%M:%S.%f%z'`` :param parser: (Optional) An external function to use for parsing instead of the built-in parser. It should return a ``datetime.datetime`` instance. :param tzd: Sets the time zone policy. Default: ``'allow'`` ============== ====================================================================== ``'require'`` Values must specify a time zone. ``'allow'`` Values both with and without a time zone designator are allowed. ``'utc'`` Like ``allow``, but values with no time zone information are assumed to be in UTC. ``'reject'`` Values must not specify a time zone. This also prohibits timestamps. ============== ====================================================================== :param convert_tz: Indicates whether values with a time zone designator should be automatically converted to UTC. Default: ``False`` * ``True``: Convert the datetime to UTC based on its time zone offset. * ``False``: Don't convert. Keep the original time and offset intact. :param drop_tzinfo: Can be set to automatically remove the ``tzinfo`` objects. This option should generally be used in conjunction with the ``convert_tz`` option unless you only care about local wall clock times. Default: ``False`` * ``True``: Discard the ``tzinfo`` components and make naive ``datetime`` objects instead. * ``False``: Preserve the ``tzinfo`` components if present. """ primitive_type: Type[Any] = str native_type = datetime.datetime SERIALIZED_FORMAT = "%Y-%m-%dT%H:%M:%S.%f%z" MESSAGES = { "parse": _("Could not parse {0}. Should be ISO 8601 or timestamp."), "parse_formats": _("Could not parse {0}. Valid formats: {1}"), "parse_external": _("Could not parse {0}."), "parse_tzd_require": _("Could not parse {0}. Time zone offset required."), "parse_tzd_reject": _("Could not parse {0}. Time zone offset not allowed."), "tzd_require": _("Could not convert {0}. Time zone required but not found."), "tzd_reject": _("Could not convert {0}. Time zone offsets not allowed."), "validate_tzd_require": _("Time zone information required but not found."), "validate_tzd_reject": _("Time zone information not allowed."), "validate_utc_none": _("Time zone must be UTC but was None."), "validate_utc_wrong": _("Time zone must be UTC."), } REGEX = re.compile( r""" (?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)(?:T|\ ) (?P<hour>\d\d):(?P<minute>\d\d) (?::(?P<second>\d\d)(?:(?:\.|,)(?P<sec_frac>\d{1,6}))?)? (?:(?P<tzd_offset>(?P<tzd_sign>[+−-])(?P<tzd_hour>\d\d):?(?P<tzd_minute>\d\d)?) |(?P<tzd_utc>Z))?$""", re.X, ) TIMEDELTA_ZERO = datetime.timedelta(0) class fixed_timezone(datetime.tzinfo): def utcoffset(self, dt): return self.offset def fromutc(self, dt): return dt + self.offset def dst(self, dt): return None def tzname(self, dt): return self.str def __str__(self): return self.str def __repr__(self, info=""): return f"{type(self).__name__}({info})" class utc_timezone(fixed_timezone): offset = datetime.timedelta(0) name = str = "UTC" class offset_timezone(fixed_timezone): def __init__(self, hours=0, minutes=0): self.offset = datetime.timedelta(hours=hours, minutes=minutes) total_seconds = self.offset.days * 86400 + self.offset.seconds self.str = "{0:s}{1:02d}:{2:02d}".format( "+" if total_seconds >= 0 else "-", int(abs(total_seconds) / 3600), int(abs(total_seconds) % 3600 / 60), ) def __repr__(self): return DateTimeType.fixed_timezone.__repr__(self, self.str) UTC = utc_timezone() EPOCH = datetime.datetime(1970, 1, 1, tzinfo=UTC) def __init__( self, formats=None, serialized_format=None, parser=None, tzd="allow", convert_tz=False, drop_tzinfo=False, **kwargs, ): """Create a datetime.datetime field.""" if tzd not in ("require", "allow", "utc", "reject"): raise ValueError( "DateTimeType.__init__() got an invalid value for parameter 'tzd'" ) self.formats = listify(formats) self.serialized_format = serialized_format or self.SERIALIZED_FORMAT self.parser = parser self.tzd = tzd self.convert_tz = convert_tz self.drop_tzinfo = drop_tzinfo super().__init__(**kwargs) def _mock(self, context=None): dt = datetime.datetime( year=random.randrange(600) + 1900, month=random.randrange(12) + 1, day=random.randrange(28) + 1, hour=random.randrange(24), minute=random.randrange(60), second=random.randrange(60), microsecond=random.randrange(1000000), ) if ( self.tzd == "reject" or self.drop_tzinfo or self.tzd == "allow" and random.randrange(2) ): return dt if self.convert_tz: return dt.replace(tzinfo=self.UTC) return dt.replace( tzinfo=self.offset_timezone( hours=random.randrange(-12, 15), minutes=random.choice([0, 30, 45]) ) ) def to_native(self, value, context=None): if isinstance(value, datetime.datetime): if value.tzinfo is None: if not self.drop_tzinfo: if self.tzd == "require": raise ConversionError( self.messages["tzd_require"].format(value) ) if self.tzd == "utc": value = value.replace(tzinfo=self.UTC) else: if self.tzd == "reject": raise ConversionError(self.messages["tzd_reject"].format(value)) if self.convert_tz: value = value.astimezone(self.UTC) if self.drop_tzinfo: value = value.replace(tzinfo=None) return value if self.formats: # Delegate to datetime.datetime.strptime() using provided format strings. for fmt in self.formats: try: dt = datetime.datetime.strptime(value, fmt) break except (ValueError, TypeError): continue else: raise ConversionError( self.messages["parse_formats"].format( value, ", ".join(self.formats) ) ) elif self.parser: # Delegate to external parser. try: dt = self.parser(value) except Exception as exc: raise ConversionError( self.messages["parse_external"].format(value) ) from exc else: # Use built-in parser. try: value = float(value) except ValueError: dt = self.from_string(value) except TypeError as exc: raise ConversionError(self.messages["parse"].format(value)) from exc else: dt = self.from_timestamp(value) if not dt: raise ConversionError(self.messages["parse"].format(value)) if dt.tzinfo is None: if self.tzd == "require": raise ConversionError(self.messages["parse_tzd_require"].format(value)) if self.tzd == "utc" and not self.drop_tzinfo: dt = dt.replace(tzinfo=self.UTC) else: if self.tzd == "reject": raise ConversionError(self.messages["parse_tzd_reject"].format(value)) if self.convert_tz: dt = dt.astimezone(self.UTC) if self.drop_tzinfo: dt = dt.replace(tzinfo=None) return dt def from_string(self, value): match = self.REGEX.match(value) if not match: return None parts = dict(((k, v) for k, v in match.groupdict().items() if v is not None)) def p(name): return int(parts.get(name, 0)) microsecond = p("sec_frac") and p("sec_frac") * 10 ** ( 6 - len(parts["sec_frac"]) ) if "tzd_utc" in parts: tz = self.UTC elif "tzd_offset" in parts: tz_sign = 1 if parts["tzd_sign"] == "+" else -1 tz_offset = (p("tzd_hour") * 60 + p("tzd_minute")) * tz_sign if tz_offset == 0: tz = self.UTC else: tz = self.offset_timezone(minutes=tz_offset) else: tz = None try: return datetime.datetime( p("year"), p("month"), p("day"), p("hour"), p("minute"), p("second"), microsecond, tz, ) except (ValueError, TypeError): return None def from_timestamp(self, value): try: return datetime.datetime(1970, 1, 1, tzinfo=self.UTC) + datetime.timedelta( seconds=value ) except (ValueError, TypeError): return None def to_primitive(self, value, context=None): if callable(self.serialized_format): return self.serialized_format(value) return value.strftime(self.serialized_format) def validate_tz(self, value, context=None): if value.tzinfo is None: if not self.drop_tzinfo: if self.tzd == "require": raise ValidationError(self.messages["validate_tzd_require"]) if self.tzd == "utc": raise ValidationError(self.messages["validate_utc_none"]) else: if self.drop_tzinfo: raise ValidationError(self.messages["validate_tzd_reject"]) if self.tzd == "reject": raise ValidationError(self.messages["validate_tzd_reject"]) if self.convert_tz and value.tzinfo.utcoffset(value) != self.TIMEDELTA_ZERO: raise ValidationError(self.messages["validate_utc_wrong"]) class UTCDateTimeType(DateTimeType): """A variant of ``DateTimeType`` that normalizes everything to UTC and stores values as naive ``datetime`` instances. By default sets ``tzd='utc'``, ``convert_tz=True``, and ``drop_tzinfo=True``. The standard export format always includes the UTC time zone designator ``"Z"``. """ SERIALIZED_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" def __init__( self, formats=None, parser=None, tzd="utc", convert_tz=True, drop_tzinfo=True, **kwargs, ): """Create a datetime.datetime in UTC field.""" super().__init__( formats=formats, parser=parser, tzd=tzd, convert_tz=convert_tz, drop_tzinfo=drop_tzinfo, **kwargs, ) class TimestampType(DateTimeType): """A variant of ``DateTimeType`` that exports itself as a Unix timestamp instead of an ISO 8601 string. Always sets ``tzd='require'`` and ``convert_tz=True``. """ primitive_type = float def __init__(self, formats=None, parser=None, drop_tzinfo=False, **kwargs): """Create a datetime.datetime as a float field.""" super().__init__( formats=formats, parser=parser, tzd="require", convert_tz=True, drop_tzinfo=drop_tzinfo, **kwargs, ) def to_primitive(self, value, context=None): if value.tzinfo is None: value = value.replace(tzinfo=self.UTC) else: value = value.astimezone(self.UTC) delta = value - self.EPOCH return delta.total_seconds() class TimedeltaType(BaseType): """Converts Python Timedelta objects into the corresponding value in seconds.""" primitive_type = float native_type = datetime.timedelta MESSAGES = { "convert": _("Couldn't interpret '{0}' value as Timedelta."), } DAYS = "days" SECONDS = "seconds" MICROSECONDS = "microseconds" MILLISECONDS = "milliseconds" MINUTES = "minutes" HOURS = "hours" WEEKS = "weeks" def __init__(self, precision="seconds", **kwargs): """Create a datetime.timedelta field.""" precision = precision.lower() units = ( self.DAYS, self.SECONDS, self.MICROSECONDS, self.MILLISECONDS, self.MINUTES, self.HOURS, self.WEEKS, ) if precision not in units: raise ValueError( "TimedeltaType.__init__() got an invalid value for parameter 'precision'" ) self.precision = precision super().__init__(**kwargs) def _mock(self, context=None): return datetime.timedelta(seconds=random.random() * 1000) def to_native(self, value, context=None): if isinstance(value, datetime.timedelta): return value try: return datetime.timedelta(**{self.precision: float(value)}) except (ValueError, TypeError) as exc: raise ConversionError(self.messages["convert"].format(value)) from exc def to_primitive(self, value, context=None): base_unit = datetime.timedelta(**{self.precision: 1}) return int(value.total_seconds() / base_unit.total_seconds()) class GeoPointType(BaseType): """A list storing a latitude and longitude.""" primitive_type = list native_type = list MESSAGES = { "point_min": _("{0} value {1} should be greater than or equal to {2}."), "point_max": _("{0} value {1} should be less than or equal to {2}."), } def _mock(self, context=None): return (random.randrange(-90, 90), random.randrange(-180, 180)) @classmethod def _normalize(cls, value): if isinstance(value, dict): return list(value.values()) return list(value) def to_native(self, value, context=None): """Make sure that a geo-value is of type (x, y)""" if not isinstance(value, (tuple, list, dict)): raise ConversionError( _("GeoPointType can only accept tuples, lists, or dicts") ) elements = self._normalize(value) if not len(elements) == 2: raise ConversionError(_("Value must be a two-dimensional point")) if not all(isinstance(v, (float, int)) for v in elements): raise ConversionError(_("Both values in point must be float or int")) return value def validate_range(self, value, context=None): latitude, longitude = self._normalize(value) if latitude < -90: raise ValidationError( self.messages["point_min"].format("Latitude", latitude, "-90") ) if latitude > 90: raise ValidationError( self.messages["point_max"].format("Latitude", latitude, "90") ) if longitude < -180: raise ValidationError( self.messages["point_min"].format("Longitude", longitude, -180) ) if longitude > 180: raise ValidationError( self.messages["point_max"].format("Longitude", longitude, 180) ) class MultilingualStringType(BaseType): """ A multilanguage string field, stored as a dict with {'locale': 'localized_value'}. Minimum and maximum lengths apply to each of the localized values. At least one of ``default_locale`` or ``context.app_data['locale']`` must be defined when calling ``.to_primitive``. """ primitive_type = str native_type = str allow_casts = (int, bytes) MESSAGES = { "convert": _("Couldn't interpret value as string."), "max_length": _("String value in locale {0} is too long."), "min_length": _("String value in locale {0} is too short."), "locale_not_found": _("No requested locale was available."), "no_locale": _("No default or explicit locales were given."), "regex_locale": _("Name of locale {0} did not match validation regex."), "regex_localized": _( "String value in locale {0} did not match validation regex." ), } LOCALE_REGEX = r"^[a-z]{2}(:?_[A-Z]{2})?$" def __init__( self, regex=None, max_length=None, min_length=None, default_locale=None, locale_regex=LOCALE_REGEX, **kwargs, ): self.regex = re.compile(regex) if regex else None self.max_length = max_length self.min_length = min_length self.default_locale = default_locale self.locale_regex = re.compile(locale_regex) if locale_regex else None super().__init__(**kwargs) def _mock(self, context=None): return random_string(get_value_in(self.min_length, self.max_length)) def to_native(self, value, context=None): """Make sure a MultilingualStringType value is a dict or None.""" if not (value is None or isinstance(value, dict)): raise ConversionError(_("Value must be a dict or None")) return value def to_primitive(self, value, context=None): """ Use a combination of ``default_locale`` and ``context.app_data['locale']`` to return the best localized string. """ if value is None: return None context_locale = None if context and "locale" in context.app_data: context_locale = context.app_data["locale"] # Build a list of all possible locales to try possible_locales = [] for locale in (context_locale, self.default_locale): if not locale: continue if isinstance(locale, str): possible_locales.append(locale) else: possible_locales.extend(locale) if not possible_locales: raise ConversionError(self.messages["no_locale"]) for locale in possible_locales: if locale in value: localized = value[locale] break else: raise ConversionError(self.messages["locale_not_found"]) if not isinstance(localized, str): if isinstance(localized, self.allow_casts): if isinstance(localized, bytes): localized = str(localized, "utf-8") else: localized = str(localized) else: raise ConversionError(self.messages["convert"]) return localized def validate_length(self, value, context=None): for locale, localized in value.items(): len_of_value = len(localized) if localized else 0 if self.max_length is not None and len_of_value > self.max_length: raise ValidationError(self.messages["max_length"].format(locale)) if self.min_length is not None and len_of_value < self.min_length: raise ValidationError(self.messages["min_length"].format(locale)) def validate_regex(self, value, context=None): if self.regex is None and self.locale_regex is None: return for locale, localized in value.items(): if self.regex is not None and self.regex.match(localized) is None: raise ValidationError(self.messages["regex_localized"].format(locale)) if ( self.locale_regex is not None and self.locale_regex.match(locale) is None ): raise ValidationError(self.messages["regex_locale"].format(locale))
/schematics-py310-plus-0.0.4.tar.gz/schematics-py310-plus-0.0.4/schematics/types/base.py
0.771715
0.195479
base.py
pypi
import inspect from collections import namedtuple from schematics import models from schematics import types name = 'schematics_to_swagger' DEFAULT_SWAGGER_VERSION = 2 SwaggerProp = namedtuple('SwaggerProp', ['name', 'func'], defaults=(None, lambda x: x)) _KNOWN_PROPS = { 'max_length': SwaggerProp('maxLength'), 'min_length': SwaggerProp('minLength'), 'min_value': SwaggerProp('minimum'), 'max_value': SwaggerProp('maximum'), 'regex': SwaggerProp('pattern', lambda x: x.pattern), 'choices': SwaggerProp('enum'), } def _map_type_properties(t): props = {} for k, v in _KNOWN_PROPS.items(): prop_value = getattr(t, k, None) if prop_value: props[v.name] = v.func(prop_value) # passthrough metadata items for k, v in t.metadata.items(): props[k] = v return props _DATATYPES = { # Base types types.BooleanType: lambda t: dict(type='boolean', **_map_type_properties(t)), types.IntType: lambda t: dict(type='integer', format='int32', **_map_type_properties(t)), types.LongType: lambda t: dict(type='integer', format='int64', **_map_type_properties(t)), types.FloatType: lambda t: dict(type='number', format='float', **_map_type_properties(t)), types.DecimalType: lambda t: dict(type='number', format='double', **_map_type_properties(t)), types.StringType: lambda t: dict(type='string', **_map_type_properties(t)), types.UUIDType: lambda t: dict(type='string', format='uuid', **_map_type_properties(t)), types.MD5Type: lambda t: dict(type='string', format='md5', **_map_type_properties(t)), types.SHA1Type: lambda t: dict(type='string', format='sha1', **_map_type_properties(t)), types.DateType: lambda t: dict(type='string', format='date', **_map_type_properties(t)), types.DateTimeType: lambda t: dict(type='string', format='date-time', **_map_type_properties(t)), # Net types types.EmailType: lambda t: dict(type='string', format='email', **_map_type_properties(t)), types.URLType: lambda t: dict(type='string', format='uri', **_map_type_properties(t)), # Compound types types.ModelType: lambda t: dict({'$ref': '#/definitions/%s' % t.model_name}, **_map_type_properties(t)), types.ListType: lambda t: dict(type='array', items=_map_schematics_type(t.field), **_map_type_properties(t)) } def _map_schematics_type(t): if t.__class__ in _DATATYPES: return _DATATYPES[t.__class__](t) def model_to_definition(model): properties = {} required = [] for field_name, field in model.fields.items(): if field_name.startswith(f'_{model.__name__}'): continue # Exclude private fields properties[field_name] = _map_schematics_type(field) if getattr(field, 'required'): required.append(field_name) result_info = { 'type': 'object', 'title': model.__name__, 'description': model.__doc__, 'properties': properties } if required: result_info['required'] = required return result_info def _build_model_type_v3(t): ref = {'$ref': f'#/components/schemas/{t.model_name}'} if t.metadata and not getattr(t, 'required', False): field = {'allOf': [ref], **_map_type_properties(t)} else: field = {**ref, **_map_type_properties(t)} return field def version_dependencies(version): if version == 3: global _DATATYPES _DATATYPES.update({ types.ModelType: _build_model_type_v3 }) def read_models_from_module(module, version=DEFAULT_SWAGGER_VERSION): version_dependencies(version) results = {} for item in dir(module): if item.startswith('_'): continue # Skip private stuff obj = getattr(module, item) if inspect.isclass(obj) and issubclass(obj, models.Model) and obj.__module__ == module.__name__: results[item] = model_to_definition(obj) return results
/schematics_to_swagger-1.4.7-py3-none-any.whl/schematics_to_swagger/__init__.py
0.530236
0.332608
__init__.py
pypi
import collections import numbers import lxml.builder import lxml.etree from schematics import Model from schematics.types import BaseType, ModelType, CompoundType, ListType, DictType from schematics.types.base import MultilingualStringType from schematics.types.compound import PolyModelType from xmltodict import parse class XMLModel(Model): """ A model that can convert it's fields to and from XML. """ @property def xml_root(self) -> str: """ Override this attribute to set the XML root returned by :py:meth:`.XMLModel.to_xml`. """ return type(self).__name__.lower() #: Override this attribute to set the encoding specified in the XML returned by :py:meth:`.XMLModel.to_xml`. xml_encoding = 'UTF-8' def to_xml(self, role: str=None, app_data: dict=None, encoding: str=None, **kwargs) -> str: """ Return a string of XML that represents this model. Currently all arguments are passed through to schematics.Model.to_primitive. :param role: schematics Model to_primitive role parameter. :param app_data: schematics Model to_primitive app_data parameter. :param encoding: xml encoding attribute string. :param kwargs: schematics Model to_primitive kwargs parameter. """ primitive = self.to_primitive(role=role, app_data=app_data, **kwargs) root = self.primitive_to_xml(primitive) return lxml.etree.tostring( # pylint: disable=no-member root, pretty_print=True, xml_declaration=True, encoding=encoding or self.xml_encoding ) def primitive_to_xml(self, primitive: dict, parent: 'lxml.etree._Element'=None): element_maker = lxml.builder.ElementMaker() if parent is None: parent = getattr(element_maker, self.xml_root)() for key, value in primitive.items(): self.primitive_value_to_xml(key, parent, value) return parent def primitive_value_to_xml(self, key, parent, value): element_maker = lxml.builder.ElementMaker() if isinstance(value, bool): parent.append(getattr(element_maker, key)('1' if value else '0')) elif isinstance(value, numbers.Number) or isinstance(value, str): parent.append(getattr(element_maker, key)(str(value))) elif value is None: parent.append(getattr(element_maker, key)('')) elif isinstance(value, dict): _parent = getattr(element_maker, key)() parent.append(self.primitive_to_xml(value, _parent)) elif isinstance(value, collections.abc.Iterable): for _value in value: self.primitive_value_to_xml(key, parent, _value) else: raise TypeError('Unsupported data type: %s (%s)' % (value, type(value).__name__)) @classmethod def from_xml(cls, xml: str) -> Model: """ Convert XML into a model. :param xml: A string of XML that represents this Model. """ if model_has_field_type(MultilingualStringType, cls): raise NotImplementedError("Field type 'MultilingualStringType' is not supported.") primitive = parse(xml) if len(primitive) != 1: raise NotImplementedError for _, raw_data in primitive.items(): if model_has_field_type(ListType, cls): # We need to ensure that single item lists are actually lists and not dicts raw_data = ensure_lists_in_model(raw_data, cls) return cls(raw_data=raw_data) def model_has_field_type(needle: BaseType, haystack: Model) -> bool: """ Return True if haystack contains a field of type needle. Iterates over all fields (and into field if appropriate) and searches for field type *needle* in model *haystack*. :param needle: A schematics field class to search for. :param haystack: A schematics model to search within. """ for _, field in haystack._field_list: # pylint: disable=protected-access if field_has_type(needle, field): return True return False def field_has_type(needle: BaseType, field: BaseType) -> bool: # pylint: disable=too-many-return-statements, too-many-branches """ Return True if field haystack contains a field of type needle. :param needle: A schematics field class to search for. :param haystack: An instance of a schematics field within a model. """ if isinstance(field, needle): return True elif isinstance(field, ModelType): if model_has_field_type(needle, field.model_class): return True elif isinstance(field, PolyModelType): if needle in [type(obj) for obj in field.model_classes]: return True for obj in [obj for obj in field.model_classes if isinstance(obj, ModelType)]: if model_has_field_type(needle, obj.model_class): return True elif isinstance(field, CompoundType): if needle == type(field.field): return True try: if needle == field.model_class: return True except AttributeError: pass else: if model_has_field_type(needle, field.model_class): return True if field_has_type(needle, field.field): return True return False def ensure_lists_in_model(raw_data: dict, model_cls: XMLModel): """ Ensure that single item lists are represented as lists and not dicts. In XML single item lists are converted to dicts by xmltodict - there is essentially no way for xmltodict to know that it *should* be a list not a dict. :param raw_data: :param model_cls: """ if not model_has_field_type(ListType, model_cls): return raw_data for _, field in model_cls._field_list: # pylint: disable=protected-access key = field.serialized_name or field.name try: value = raw_data[key] except KeyError: continue raw_data[key] = ensure_lists_in_value(value, field) return raw_data def ensure_lists_in_value(value: 'typing.Any', field: BaseType): if value is None: # Don't turn None items into a list of None items return None if isinstance(field, ListType): if not isinstance(value, list): value = [ ensure_lists_in_value(value, field.field) ] elif field_has_type(ListType, field.field): value = [ ensure_lists_in_value(_value, field.field) for _value in value ] elif field_has_type(ListType, field): if isinstance(field, DictType): for _key, _value in value.items(): value[_key] = ensure_lists_in_value(_value, field.field) elif isinstance(field, ModelType): value = ensure_lists_in_model(value, field.model_class) return value
/schematics-xml-0.2.1.tar.gz/schematics-xml-0.2.1/schematics_xml/models.py
0.727685
0.298798
models.py
pypi
2.1.1 / 2021-08-17 ================== - Update error message for incorrect choices field `#572 <https://github.com/schematics/schematics/pull/572>`__ (`begor <https://github.com/begor>`__) - Avoid some deprecation warnings when using Python 3 `#576 <https://github.com/schematics/schematics/pull/576>`__ (`jesuslosada <https://github.com/jesuslosada>`__) - Fix EnumType enums with value=0 not working with use_values=True `#594 <https://github.com/schematics/schematics/pull/594>`__ (`nikhilgupta345 <https://github.com/nikhilgupta345>`__) - Fix syntax warning over comparison of literals using is. `#611 <https://github.com/schematics/schematics/pull/611>`__ (`tirkarthi <https://github.com/tirkarthi>`__) - Add help text generation capability to Models `#543 <https://github.com/schematics/schematics/pull/543>`__ (`MartinHowarth <https://github.com/MartinHowarth>`__) - Update documentation `#578 <https://github.com/schematics/schematics/pull/578>`__ (`BobDu <https://github.com/BobDu>`__) `#604 <https://github.com/schematics/schematics/pull/604>`__ (`BryanChan777 <https://github.com/BryanChan777>`__) `#605 <https://github.com/schematics/schematics/pull/605>`__ (`timgates42 <https://github.com/timgates42>`__) `#608 <https://github.com/schematics/schematics/pull/608>`__ (`dasubermanmind <https://github.com/dasubermanmind>`__) - Add test coverage for model validation inside Dict/List `#588 <https://github.com/schematics/schematics/pull/588>`__ (`borgstrom <https://github.com/borgstrom>`__) - Added German translation `#614 <https://github.com/schematics/schematics/pull/614>`__ (`hkage <https://github.com/hkage>`__) 2.1.0 / 2018-06-25 ================== **[BREAKING CHANGE]** - Drop Python 2.6 support `#517 <https://github.com/schematics/schematics/pull/517>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) Other changes: - Add TimedeltaType `#540 <https://github.com/schematics/schematics/pull/540>`__ (`gabisurita <https://github.com/gabisurita>`__) - Allow to create Model fields dynamically `#512 <https://github.com/schematics/schematics/pull/512>`__ (`lkraider <https://github.com/lkraider>`__) - Allow ModelOptions to have extra parameters `#449 <https://github.com/schematics/schematics/pull/449>`__ (`rmb938 <https://github.com/rmb938>`__) `#506 <https://github.com/schematics/schematics/pull/506>`__ (`ekampf <https://github.com/ekampf>`__) - Accept callables as serialize roles `#508 <https://github.com/schematics/schematics/pull/508>`__ (`lkraider <https://github.com/lkraider>`__) (`jaysonsantos <https://github.com/jaysonsantos>`__) - Simplify PolyModelType.find_model for readability `#537 <https://github.com/schematics/schematics/pull/537>`__ (`kstrauser <https://github.com/kstrauser>`__) - Enable PolyModelType recursive validation `#535 <https://github.com/schematics/schematics/pull/535>`__ (`javiertejero <https://github.com/javiertejero>`__) - Documentation fixes `#509 <https://github.com/schematics/schematics/pull/509>`__ (`Tuoris <https://github.com/Tuoris>`__) `#514 <https://github.com/schematics/schematics/pull/514>`__ (`tommyzli <https://github.com/tommyzli>`__) `#518 <https://github.com/schematics/schematics/pull/518>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#546 <https://github.com/schematics/schematics/pull/546>`__ (`harveyslash <https://github.com/harveyslash>`__) - Fix Model.init validation when partial is True `#531 <https://github.com/schematics/schematics/issues/531>`__ (`lkraider <https://github.com/lkraider>`__) - Minor number types refactor and mocking fixes `#519 <https://github.com/schematics/schematics/pull/519>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#520 <https://github.com/schematics/schematics/pull/520>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) - Add ability to import models as strings `#496 <https://github.com/schematics/schematics/pull/496>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) - Add EnumType `#504 <https://github.com/schematics/schematics/pull/504>`__ (`ekamil <https://github.com/ekamil>`__) - Dynamic models: Possible memory issues because of _subclasses `#502 <https://github.com/schematics/schematics/pull/502>`__ (`mjrk <https://github.com/mjrk>`__) - Add type hints to constructors of field type classes `#488 <https://github.com/schematics/schematics/pull/488>`__ (`KonishchevDmitry <https://github.com/KonishchevDmitry>`__) - Regression: Do not call field validator if field has not been set `#499 <https://github.com/schematics/schematics/pull/499>`__ (`cmonfort <https://github.com/cmonfort>`__) - Add possibility to translate strings and add initial pt_BR translations `#495 <https://github.com/schematics/schematics/pull/495>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.1 / 2017-05-30 ================== - Support for raising DataError inside custom validate_fieldname methods. `#441 <https://github.com/schematics/schematics/pull/441>`__ (`alexhayes <https://github.com/alexhayes>`__) - Add specialized SchematicsDeprecationWarning. (`lkraider <https://github.com/lkraider>`__) - DateTimeType to_native method should handle type errors gracefully. `#491 <https://github.com/schematics/schematics/pull/491>`__ (`e271828- <https://github.com/e271828->`__) - Allow fields names to override the mapping-interface methods. `#489 <https://github.com/schematics/schematics/pull/489>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.0 / 2017-05-22 ================== **[BREAKING CHANGE]** Version 2.0 introduces many API changes, and it is not fully backwards-compatible with 1.x code. `Full Changelog <https://github.com/schematics/schematics/compare/v1.1.2...v2.0.0>`_ - Add syntax highlighting to README examples `#486 <https://github.com/schematics/schematics/pull/486>`__ (`gabisurita <https://github.com/gabisurita>`__) - Encode Unsafe data state in Model `#484 <https://github.com/schematics/schematics/pull/484>`__ (`lkraider <https://github.com/lkraider>`__) - Add MACAddressType `#482 <https://github.com/schematics/schematics/pull/482>`__ (`aleksej-paschenko <https://github.com/aleksej-paschenko>`__) 2.0.0.b1 / 2017-04-06 ===================== - Enhancing and addressing some issues around exceptions: `#477 <https://github.com/schematics/schematics/pull/477>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) - Allow primitive and native types to be inspected `#431 <https://github.com/schematics/schematics/pull/431>`__ (`chadrik <https://github.com/chadrik>`__) - Atoms iterator performance improvement `#476 <https://github.com/schematics/schematics/pull/476>`__ (`vovanbo <https://github.com/vovanbo>`__) - Fixes 453: Recursive import\_loop with ListType `#475 <https://github.com/schematics/schematics/pull/475>`__ (`lkraider <https://github.com/lkraider>`__) - Schema API `#466 <https://github.com/schematics/schematics/pull/466>`__ (`lkraider <https://github.com/lkraider>`__) - Tweak code example to avoid sql injection `#462 <https://github.com/schematics/schematics/pull/462>`__ (`Ian-Foote <https://github.com/Ian-Foote>`__) - Convert readthedocs links for their .org -> .io migration for hosted projects `#454 <https://github.com/schematics/schematics/pull/454>`__ (`adamchainz <https://github.com/adamchainz>`__) - Support all non-string Iterables as choices (dev branch) `#436 <https://github.com/schematics/schematics/pull/436>`__ (`di <https://github.com/di>`__) - When testing if a values is None or Undefined, use 'is'. `#425 <https://github.com/schematics/schematics/pull/425>`__ (`chadrik <https://github.com/chadrik>`__) 2.0.0a1 / 2016-05-03 ==================== - Restore v1 to\_native behavior; simplify converter code `#412 <https://github.com/schematics/schematics/pull/412>`__ (`bintoro <https://github.com/bintoro>`__) - Change conversion rules for booleans `#407 <https://github.com/schematics/schematics/pull/407>`__ (`bintoro <https://github.com/bintoro>`__) - Test for Model.\_\_init\_\_ context passing to types `#399 <https://github.com/schematics/schematics/pull/399>`__ (`sheilatron <https://github.com/sheilatron>`__) - Code normalization for Python 3 + general cleanup `#391 <https://github.com/schematics/schematics/pull/391>`__ (`bintoro <https://github.com/bintoro>`__) - Add support for arbitrary field metadata. `#390 <https://github.com/schematics/schematics/pull/390>`__ (`chadrik <https://github.com/chadrik>`__) - Introduce MixedType `#380 <https://github.com/schematics/schematics/pull/380>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev2 / 2016-02-06 ======================= - Type maintenance `#383 <https://github.com/schematics/schematics/pull/383>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev1 / 2016-02-01 ======================= - Performance optimizations `#378 <https://github.com/schematics/schematics/pull/378>`__ (`bintoro <https://github.com/bintoro>`__) - Validation refactoring + exception redesign `#374 <https://github.com/schematics/schematics/pull/374>`__ (`bintoro <https://github.com/bintoro>`__) - Fix typo: serilaizataion --> serialization `#373 <https://github.com/schematics/schematics/pull/373>`__ (`jeffwidman <https://github.com/jeffwidman>`__) - Add support for undefined values `#372 <https://github.com/schematics/schematics/pull/372>`__ (`bintoro <https://github.com/bintoro>`__) - Serializable improvements `#371 <https://github.com/schematics/schematics/pull/371>`__ (`bintoro <https://github.com/bintoro>`__) - Unify import/export interface across all types `#368 <https://github.com/schematics/schematics/pull/368>`__ (`bintoro <https://github.com/bintoro>`__) - Correctly decode bytestrings in Python 3 `#365 <https://github.com/schematics/schematics/pull/365>`__ (`bintoro <https://github.com/bintoro>`__) - Fix NumberType.to\_native() `#364 <https://github.com/schematics/schematics/pull/364>`__ (`bintoro <https://github.com/bintoro>`__) - Make sure field.validate() uses a native type `#363 <https://github.com/schematics/schematics/pull/363>`__ (`bintoro <https://github.com/bintoro>`__) - Don't validate ListType items twice `#362 <https://github.com/schematics/schematics/pull/362>`__ (`bintoro <https://github.com/bintoro>`__) - Collect field validators as bound methods `#361 <https://github.com/schematics/schematics/pull/361>`__ (`bintoro <https://github.com/bintoro>`__) - Propagate environment during recursive import/export/validation `#359 <https://github.com/schematics/schematics/pull/359>`__ (`bintoro <https://github.com/bintoro>`__) - DateTimeType & TimestampType major rewrite `#358 <https://github.com/schematics/schematics/pull/358>`__ (`bintoro <https://github.com/bintoro>`__) - Always export empty compound objects as {} / [] `#351 <https://github.com/schematics/schematics/pull/351>`__ (`bintoro <https://github.com/bintoro>`__) - export\_loop cleanup `#350 <https://github.com/schematics/schematics/pull/350>`__ (`bintoro <https://github.com/bintoro>`__) - Fix FieldDescriptor.\_\_delete\_\_ to not touch model `#349 <https://github.com/schematics/schematics/pull/349>`__ (`bintoro <https://github.com/bintoro>`__) - Add validation method for latitude and longitude ranges in GeoPointType `#347 <https://github.com/schematics/schematics/pull/347>`__ (`wraziens <https://github.com/wraziens>`__) - Fix longitude values for GeoPointType mock and add tests `#344 <https://github.com/schematics/schematics/pull/344>`__ (`wraziens <https://github.com/wraziens>`__) - Add support for self-referential ModelType fields `#335 <https://github.com/schematics/schematics/pull/335>`__ (`bintoro <https://github.com/bintoro>`__) - avoid unnecessary code path through try/except `#327 <https://github.com/schematics/schematics/pull/327>`__ (`scavpy <https://github.com/scavpy>`__) - Get mock object for ModelType and ListType `#306 <https://github.com/schematics/schematics/pull/306>`__ (`kaiix <https://github.com/kaiix>`__) 1.1.3 / 2017-06-27 ================== * [Maintenance] (`#501 <https://github.com/schematics/schematics/issues/501>`_) Dynamic models: Possible memory issues because of _subclasses 1.1.2 / 2017-03-27 ================== * [Bug] (`#478 <https://github.com/schematics/schematics/pull/478>`_) Fix dangerous performance issue with ModelConversionError in nested models 1.1.1 / 2015-11-03 ================== * [Bug] (`befa202 <https://github.com/schematics/schematics/commit/befa202c3b3202aca89fb7ef985bdca06f9da37c>`_) Fix Unicode issue with DecimalType * [Documentation] (`41157a1 <https://github.com/schematics/schematics/commit/41157a13896bd32a337c5503c04c5e9cc30ba4c7>`_) Documentation overhaul * [Bug] (`860d717 <https://github.com/schematics/schematics/commit/860d71778421981f284c0612aec665ebf0cfcba2>`_) Fix import that was negatively affecting performance * [Feature] (`93b554f <https://github.com/schematics/schematics/commit/93b554fd6a4e7b38133c4da5592b1843101792f0>`_) Add DataObject to datastructures.py * [Bug] (`#236 <https://github.com/schematics/schematics/pull/236>`_) Set `None` on a field that's a compound type should honour that semantics * [Maintenance] (`#348 <https://github.com/schematics/schematics/pull/348>`_) Update requirements * [Maintenance] (`#346 <https://github.com/schematics/schematics/pull/346>`_) Combining Requirements * [Maintenance] (`#342 <https://github.com/schematics/schematics/pull/342>`_) Remove to_primitive() method from compound types * [Bug] (`#339 <https://github.com/schematics/schematics/pull/339>`_) Basic number validation * [Bug] (`#336 <https://github.com/schematics/schematics/pull/336>`_) Don't evaluate serializable when accessed through class * [Bug] (`#321 <https://github.com/schematics/schematics/pull/321>`_) Do not compile regex * [Maintenance] (`#319 <https://github.com/schematics/schematics/pull/319>`_) Remove mock from install_requires 1.1.0 / 2015-07-12 ================== * [Feature] (`#303 <https://github.com/schematics/schematics/pull/303>`_) fix ListType, validate_items adds to errors list just field name without... * [Feature] (`#304 <https://github.com/schematics/schematics/pull/304>`_) Include Partial Data when Raising ModelConversionError * [Feature] (`#305 <https://github.com/schematics/schematics/pull/305>`_) Updated domain verifications to fit to RFC/working standards * [Feature] (`#308 <https://github.com/schematics/schematics/pull/308>`_) Grennady ordered validation * [Feature] (`#309 <https://github.com/schematics/schematics/pull/309>`_) improves date_time_type error message for custom formats * [Feature] (`#310 <https://github.com/schematics/schematics/pull/310>`_) accept optional 'Z' suffix for UTC date_time_type format * [Feature] (`#311 <https://github.com/schematics/schematics/pull/311>`_) Remove commented lines from models.py * [Feature] (`#230 <https://github.com/schematics/schematics/pull/230>`_) Message normalization 1.0.4 / 2015-04-13 ================== * [Example] (`#286 <https://github.com/schematics/schematics/pull/286>`_) Add schematics usage with Django * [Feature] (`#292 <https://github.com/schematics/schematics/pull/292>`_) increase domain length to 10 for .holiday, .vacations * [Feature] (`#297 <https://github.com/schematics/schematics/pull/297>`_) Support for fields order in serialized format * [Feature] (`#300 <https://github.com/schematics/schematics/pull/300>`_) increase domain length to 32 1.0.3 / 2015-03-07 ================== * [Feature] (`#284 <https://github.com/schematics/schematics/pull/284>`_) Add missing requirement for `six` * [Feature] (`#283 <https://github.com/schematics/schematics/pull/283>`_) Update error msgs to print out invalid values in base.py * [Feature] (`#281 <https://github.com/schematics/schematics/pull/281>`_) Update Model.__eq__ * [Feature] (`#267 <https://github.com/schematics/schematics/pull/267>`_) Type choices should be list or tuple 1.0.2 / 2015-02-12 ================== * [Bug] (`#280 <https://github.com/schematics/schematics/issues/280>`_) Fix the circular import issue. 1.0.1 / 2015-02-01 ================== * [Feature] (`#184 <https://github.com/schematics/schematics/issues/184>`_ / `03b2fd9 <https://github.com/schematics/schematics/commit/03b2fd97fb47c00e8d667cc8ea7254cc64d0f0a0>`_) Support for polymorphic model fields * [Bug] (`#233 <https://github.com/schematics/schematics/pull/233>`_) Set field.owner_model recursively and honor ListType.field.serialize_when_none * [Bug](`#252 <https://github.com/schematics/schematics/pull/252>`_) Fixed project URL * [Feature] (`#259 <https://github.com/schematics/schematics/pull/259>`_) Give export loop to serializable when type has one * [Feature] (`#262 <https://github.com/schematics/schematics/pull/262>`_) Make copies of inherited meta attributes when setting up a Model * [Documentation] (`#276 <https://github.com/schematics/schematics/pull/276>`_) Improve the documentation of get_mock_object 1.0.0 / 2014-10-16 ================== * [Documentation] (`#239 <https://github.com/schematics/schematics/issues/239>`_) Fix typo with wording suggestion * [Documentation] (`#244 <https://github.com/schematics/schematics/issues/244>`_) fix wrong reference in docs * [Documentation] (`#246 <https://github.com/schematics/schematics/issues/246>`_) Using the correct function name in the docstring * [Documentation] (`#245 <https://github.com/schematics/schematics/issues/245>`_) Making the docstring match actual parameter names * [Feature] (`#241 <https://github.com/schematics/schematics/issues/241>`_) Py3k support 0.9.5 / 2014-07-19 ================== * [Feature] (`#191 <https://github.com/schematics/schematics/pull/191>`_) Updated import_data to avoid overwriting existing data. deserialize_mapping can now support partial and nested models. * [Documentation] (`#192 <https://github.com/schematics/schematics/pull/192>`_) Document the creation of custom types * [Feature] (`#193 <https://github.com/schematics/schematics/pull/193>`_) Add primitive types accepting values of any simple or compound primitive JSON type. * [Bug] (`#194 <https://github.com/schematics/schematics/pull/194>`_) Change standard coerce_key function to unicode * [Tests] (`#196 <https://github.com/schematics/schematics/pull/196>`_) Test fixes and cleanup * [Feature] (`#197 <https://github.com/schematics/schematics/pull/197>`_) Giving context to serialization * [Bug] (`#198 <https://github.com/schematics/schematics/pull/198>`_) Fixed typo in variable name in DateTimeType * [Feature] (`#200 <https://github.com/schematics/schematics/pull/200>`_) Added the option to turn of strict conversion when creating a Model from a dict * [Feature] (`#212 <https://github.com/schematics/schematics/pull/212>`_) Support exporting ModelType fields with subclassed model instances * [Feature] (`#214 <https://github.com/schematics/schematics/pull/214>`_) Create mock objects using a class's fields as a template * [Bug] (`#215 <https://github.com/schematics/schematics/pull/215>`_) PEP 8 FTW * [Feature] (`#216 <https://github.com/schematics/schematics/pull/216>`_) Datastructures cleanup * [Feature] (`#217 <https://github.com/schematics/schematics/pull/217>`_) Models cleanup pt 1 * [Feature] (`#218 <https://github.com/schematics/schematics/pull/218>`_) Models cleanup pt 2 * [Feature] (`#219 <https://github.com/schematics/schematics/pull/219>`_) Mongo cleanup * [Feature] (`#220 <https://github.com/schematics/schematics/pull/220>`_) Temporal cleanup * [Feature] (`#221 <https://github.com/schematics/schematics/pull/221>`_) Base cleanup * [Feature] (`#224 <https://github.com/schematics/schematics/pull/224>`_) Exceptions cleanup * [Feature] (`#225 <https://github.com/schematics/schematics/pull/225>`_) Validate cleanup * [Feature] (`#226 <https://github.com/schematics/schematics/pull/226>`_) Serializable cleanup * [Feature] (`#227 <https://github.com/schematics/schematics/pull/227>`_) Transforms cleanup * [Feature] (`#228 <https://github.com/schematics/schematics/pull/228>`_) Compound cleanup * [Feature] (`#229 <https://github.com/schematics/schematics/pull/229>`_) UUID cleanup * [Feature] (`#231 <https://github.com/schematics/schematics/pull/231>`_) Booleans as numbers 0.9.4 / 2013-12-08 ================== * [Feature] (`#178 <https://github.com/schematics/schematics/pull/178>`_) Added deserialize_from flag to BaseType for alternate field names on import * [Bug] (`#186 <https://github.com/schematics/schematics/pull/186>`_) Compoundtype support in ListTypes * [Bug] (`#181 <https://github.com/schematics/schematics/pull/181>`_) Removed that stupid print statement! * [Feature] (`#182 <https://github.com/schematics/schematics/pull/182>`_) Default roles system * [Documentation] (`#190 <https://github.com/schematics/schematics/pull/190>`_) Typos * [Bug] (`#177 <https://github.com/schematics/schematics/pull/177>`_) Removed `__iter__` from ModelMeta * [Documentation] (`#188 <https://github.com/schematics/schematics/pull/188>`_) Typos 0.9.3 / 2013-10-20 ================== * [Documentation] More improvements * [Feature] (`#147 <https://github.com/schematics/schematics/pull/147>`_) Complete conversion over to py.test * [Bug] (`#176 <https://github.com/schematics/schematics/pull/176>`_) Fixed bug preventing clean override of options class * [Bug] (`#174 <https://github.com/schematics/schematics/pull/174>`_) Python 2.6 support 0.9.2 / 2013-09-13 ================== * [Documentation] New History file! * [Documentation] Major improvements to documentation * [Feature] Renamed ``check_value`` to ``validate_range`` * [Feature] Changed ``serialize`` to ``to_native`` * [Bug] (`#155 <https://github.com/schematics/schematics/pull/155>`_) NumberType number range validation bugfix
/schematics-2.1.1.tar.gz/schematics-2.1.1/HISTORY.rst
0.787278
0.657799
HISTORY.rst
pypi
Digital Logic ============= .. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import logic Logic gates can be drawn by importing the :py:mod:`schemdraw.logic.logic` module: .. code-block:: python from schemdraw import logic Logic gates are shown below. Gates define anchors for `out` and `in1`, `in2`, etc. `Buf`, `Not`, and `NotNot`, and their Schmitt-trigger counterparts, are two-terminal elements that extend leads. .. jupyter-execute:: :hide-code: def drawElements(elmlist, cols=3, dx=8, dy=2): d = schemdraw.Drawing(fontsize=12) for i, e in enumerate(elmlist): y = i//cols*-dy x = (i%cols) * dx d += getattr(logic, e)().right().at((x,y)).label(e, loc='right', ofst=.2, halign='left', valign='center') return d elms = ['And', 'Nand', 'Or', 'Nor', 'Xor', 'Xnor', 'Buf', 'Not', 'NotNot', 'Tgate', 'Tristate', 'Schmitt', 'SchmittNot', 'SchmittAnd', 'SchmittNand'] drawElements(elms, dx=6) Gates with more than 2 inputs can be created using the `inputs` parameter. With more than 3 inputs, the back of the gate will extend up and down. .. jupyter-execute:: logic.Nand(inputs=3) .. jupyter-execute:: logic.Nor(inputs=4) Finally, any input can be pre-inverted (active low) using the `inputnots` keyword with a list of input numbers, starting at 1 to match the anchor names, on which to add an invert bubble. .. jupyter-execute:: logic.Nand(inputs=3, inputnots=[1]) Logic Parser ------------ Logic trees can also be created from a string logic expression such as "(a and b) or c" using using :py:func:`schemdraw.parsing.logic_parser.logicparse`. The logic parser requires the `pyparsing <https://pyparsing-docs.readthedocs.io/en/latest/>`_ module. Examples: .. jupyter-execute:: from schemdraw.parsing import logicparse logicparse('not ((w and x) or (y and z))', outlabel='$\overline{Q}$') .. jupyter-execute:: logicparse('((a xor b) and (b or c) and (d or e)) or ((w and x) or (y and z))') Logicparse understands spelled-out logic functions "and", "or", "nand", "nor", "xor", "xnor", "not", but also common symbols such as "+", "&", "⊕" representing "or", "and", and "xor". .. jupyter-execute:: logicparse('¬ (a ∨ b) & (c ⊻ d)') # Using symbols Use the `gateH` and `gateW` parameters to adjust how gates line up: .. jupyter-execute:: logicparse('(not a) and b or c', gateH=.5) Truth Tables ------------ Simple tables can be drawn using the :py:class:`schemdraw.logic.table.Table` class. This class is included in the logic module as its primary purpose was for drawing logical truth tables. The tables are defined using typical Markdown syntax. The `colfmt` parameter works like the LaTeX tabular environment parameter for defining lines to draw between table columns: "cc|c" draws three centered columns, with a vertical line before the last column. Each column must be specified with a 'c', 'r', or 'l' for center, right, or left justification Two pipes (`||`), or a double pipe character (`ǁ`) draw a double bar between columns. Row lines are added to the table string itself, with either `---` or `===` in the row. .. jupyter-execute:: table = ''' A | B | C ---|---|--- 0 | 0 | 0 0 | 1 | 0 1 | 0 | 0 1 | 1 | 1 ''' logic.Table(table, colfmt='cc||c') Karnaugh Maps ------------- Karnaugh Maps, or K-Maps, are useful for simplifying a logical truth table into the smallest number of gates. Schemdraw can draw K-Maps, with 2, 3, or 4 input variables, using the :py:class:`schemdraw.logic.kmap.Kmap` class. .. jupyter-execute:: logic.Kmap(names='ABCD') The `names` parameter must be a string with 2, 3, or 4 characters, each defining the name of one input variable. The `truthtable` parameter contains a list of tuples defining the logic values to display in the map. The first `len(names)` elements are 0's and 1's defining the position of the cell, and the last element is the string to display in that cell. The `default` parameter is a string to show in each cell of the K-Map when that cell is undefined in the `truthtable`. For example, this 2x2 K-Map has a '1' in the 01 position, and 0's elsewhere: .. jupyter-execute:: logic.Kmap(names='AB', truthtable=[('01', '1')]) K-Maps are typically used by grouping sets of 1's together. These groupings can be drawn using the `groups` parameter. The keys of the `groups` dictionary define which cells to group together, and the values of the dictionary define style parameters for the circle around the group. Each key must be a string of length `len(names)`, with either a `0`, `1`, or `.` in each position. As an example, with `names='ABCD'`, a group key of `"1..."` will place a circle around all cells where A=1. Or `".00."` draws a circle around all cells where B and C are both 0. Groups will automatically "wrap" around the edges. Parameters of the style dictionary include `color`, `fill`, `lw`, and `ls`. .. jupyter-execute:: logic.Kmap(names='ABCD', truthtable=[('1100', '1'), ('1101', '1'), ('1111', '1'), ('1110', '1'), ('0101', '1'), ('0111', 'X'), ('1101', '1'), ('1111', '1'), ('0000', '1'), ('1000', '1')], groups={'11..': {'color': 'red', 'fill': '#ff000033'}, '.1.1': {'color': 'blue', 'fill': '#0000ff33'}, '.000': {'color': 'green', 'fill': '#00ff0033'}})
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/elements/logic.rst
0.887296
0.711681
logic.rst
pypi
Flowcharts and Diagrams ======================= .. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import flow Schemdraw provides basic symbols for flowcharting and state diagrams. The :py:mod:`schemdraw.flow.flow` module contains a set of functions for defining flowchart blocks and connecting lines that can be added to schemdraw Drawings. .. code-block:: python from schemdraw import flow Flowchart blocks: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=10, unit=.5) d.add(flow.Start().label('Start').drop('E')) d.add(flow.Arrow()) d.add(flow.Ellipse().label('Ellipse')) d.add(flow.Arrow()) d.add(flow.Box(label='Box')) d.add(flow.Arrow()) d.add(flow.RoundBox(label='RoundBox').drop('S')) d.add(flow.Arrow().down()) d.add(flow.Subroutine(label='Subroutine').drop('W')) d.add(flow.Arrow().left()) d.add(flow.Data(label='Data')) d.add(flow.Arrow()) d.add(flow.Decision(label='Decision')) d.add(flow.Arrow()) d.add(flow.Connect(label='Connect')) d.draw() Some elements have been defined with multiple names, which can be used depending on the context or user preference: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=10, unit=.5) d.add(flow.Terminal().label('Terminal').drop('E')) d.add(flow.Arrow()) d.add(flow.Process().label('Process')) d.add(flow.Arrow()) d.add(flow.RoundProcess().label('RoundProcess')) d.add(flow.Arrow()) d.add(flow.Circle(label='Circle')) d.add(flow.Arrow()) d.add(flow.State(label='State')) d.add(flow.Arrow()) d.add(flow.StateEnd(label='StateEnd')) d.draw() All flowchart symbols have 16 anchor positions named for the compass directions: 'N', 'S', 'E', 'W', 'NE', 'SE, 'NNE', etc., plus a 'center' anchor. The :py:class:`schemdraw.elements.intcircuits.Ic` element can be used with the flowchart elements to create blocks with other inputs/outputs per side if needed. The size of each block must be specified manually using `w` and `h` or `r` parameters to size each block to fit any labels. Connecting Lines ---------------- Typical flowcharts will use `Line` or `Arrow` elements to connect the boxes. The line and arrow elements have been included in the `flow` module for convenience. .. jupyter-execute:: with schemdraw.Drawing() as d: d.config(fontsize=10, unit=.5) d += flow.Terminal().label('Start') d += flow.Arrow() d += flow.Process().label('Do something').drop('E') d += flow.Arrow().right() d += flow.Process().label('Do something\nelse') Some flow diagrams, such as State Machine diagrams, often use curved connectors between states. Several Arc connectors are available. Each Arc element takes an `arrow` parameter, which may be '->', '<-', or '<->', to define the end(s) on which to draw arrowheads. Arc2 ^^^^ `Arc2` draws a symmetric quadratic Bezier curve between the endpoints, with curvature controlled by parameter `k`. Endpoints of the arc should be specified using `at()` and `to()` methods. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += (b := flow.State(arrow='->').label('B').at((4, 0))) d += flow.Arc2(arrow='->').at(a.NE).to(b.NW).color('deeppink').label('Arc2') d += flow.Arc2(k=.2, arrow='<->').at(b.SW).to(a.SE).color('mediumblue').label('Arc2') .. jupyter-execute:: :hide-code: d.draw() ArcZ and ArcN ^^^^^^^^^^^^^ These draw symmetric cubic Bezier curves between the endpoints. The `ArcZ` curve approaches the endpoints horizontally, and `ArcN` approaches them vertically. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += (b := flow.State().label('B').at((4, 4))) d += (c := flow.State().label('C').at((8, 0))) d += flow.ArcN(arrow='<->').at(a.N).to(b.S).color('deeppink').label('ArcN') d += flow.ArcZ(arrow='<->').at(b.E).to(c.W).color('mediumblue').label('ArcZ') .. jupyter-execute:: :hide-code: d.draw() Arc3 ^^^^ The `Arc3` curve is an arbitrary cubic Bezier curve, defined by endpoints and angle of approach to each endpoint. `ArcZ` and `ArcN` are simply `Arc3` defined with the angles as 0 and 180, or 90 and 270, respectively. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += (b := flow.State().label('B').at((3, 3))) d += flow.Arc3(th1=75, th2=-45, arrow='<->').at(a.N).to(b.SE).color('deeppink').label('Arc3') .. jupyter-execute:: :hide-code: d.draw() ArcLoop ^^^^^^^ The `ArcLoop` curve draws a partial circle that intersects the two endpoints, with the given radius. Often used in state machine diagrams to indicate cases where the state does not change. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += flow.ArcLoop(arrow='<-').at(a.NW).to(a.NNE).color('mediumblue').label('ArcLoop', halign='center') .. jupyter-execute:: :hide-code: d.draw() Decisions --------- To label the decision branches, the :py:class:`schemdraw.flow.flow.Decision` element takes keyword arguments for each cardinal direction. For example: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: decision = flow.Decision(W='Yes', E='No', S='Maybe').label('Question?') .. jupyter-execute:: :hide-code: dec = d.add(decision) d.add(flow.Line().at(dec.W).left()) d.add(flow.Line().at(dec.E).right()) d.add(flow.Line().at(dec.S).down()) d.draw() Layout and Flow --------------- Without any directions specified, boxes flow top to bottom (see left image). If a direction is specified (right image), the flow will continue in that direction, starting the next arrow at an appropriate anchor. Otherwise, the `drop` method is useful for specifing where to begin the next arrow. .. jupyter-execute:: with schemdraw.Drawing() as d: d.config(fontsize=10, unit=.5) d += flow.Terminal().label('Start') d += flow.Arrow() d += flow.Process().label('Step 1') d += flow.Arrow() d += flow.Process().label('Step 2').drop('E') d += flow.Arrow().right() d += flow.Connect().label('Next') d += flow.Terminal().label('Start').at((4, 0)) d += flow.Arrow().theta(-45) d += flow.Process().label('Step 1') d += flow.Arrow() d += flow.Process().label('Step 2').drop('E') d += flow.Arrow().right() d += flow.Connect().label('Next') See the :ref:`galleryflow` Gallery for more examples.
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/elements/flow.rst
0.886482
0.667792
flow.rst
pypi
Digital Logic ============= .. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import logic Logic gates can be drawn by importing the :py:mod:`schemdraw.logic.logic` module: .. code-block:: python from schemdraw import logic Logic gates are shown below. Gates define anchors for `out` and `in1`, `in2`, etc. `Buf`, `Not`, and `NotNot`, and their Schmitt-trigger counterparts, are two-terminal elements that extend leads. .. jupyter-execute:: :hide-code: def drawElements(elmlist, cols=3, dx=8, dy=2): d = schemdraw.Drawing(fontsize=12) for i, e in enumerate(elmlist): y = i//cols*-dy x = (i%cols) * dx d += getattr(logic, e)().right().at((x,y)).label(e, loc='right', ofst=.2, halign='left', valign='center') return d elms = ['And', 'Nand', 'Or', 'Nor', 'Xor', 'Xnor', 'Buf', 'Not', 'NotNot', 'Tgate', 'Tristate', 'Schmitt', 'SchmittNot', 'SchmittAnd', 'SchmittNand'] drawElements(elms, dx=6) Gates with more than 2 inputs can be created using the `inputs` parameter. With more than 3 inputs, the back of the gate will extend up and down. .. jupyter-execute:: logic.Nand(inputs=3) .. jupyter-execute:: logic.Nor(inputs=4) Finally, any input can be pre-inverted (active low) using the `inputnots` keyword with a list of input numbers, starting at 1 to match the anchor names, on which to add an invert bubble. .. jupyter-execute:: logic.Nand(inputs=3, inputnots=[1]) Logic Parser ------------ Logic trees can also be created from a string logic expression such as "(a and b) or c" using using :py:func:`schemdraw.parsing.logic_parser.logicparse`. The logic parser requires the `pyparsing <https://pyparsing-docs.readthedocs.io/en/latest/>`_ module. Examples: .. jupyter-execute:: from schemdraw.parsing import logicparse logicparse('not ((w and x) or (y and z))', outlabel='$\overline{Q}$') .. jupyter-execute:: logicparse('((a xor b) and (b or c) and (d or e)) or ((w and x) or (y and z))') Logicparse understands spelled-out logic functions "and", "or", "nand", "nor", "xor", "xnor", "not", but also common symbols such as "+", "&", "⊕" representing "or", "and", and "xor". .. jupyter-execute:: logicparse('¬ (a ∨ b) & (c ⊻ d)') # Using symbols Use the `gateH` and `gateW` parameters to adjust how gates line up: .. jupyter-execute:: logicparse('(not a) and b or c', gateH=.5) Truth Tables ------------ Simple tables can be drawn using the :py:class:`schemdraw.logic.table.Table` class. This class is included in the logic module as its primary purpose was for drawing logical truth tables. The tables are defined using typical Markdown syntax. The `colfmt` parameter works like the LaTeX tabular environment parameter for defining lines to draw between table columns: "cc|c" draws three centered columns, with a vertical line before the last column. Each column must be specified with a 'c', 'r', or 'l' for center, right, or left justification Two pipes (`||`), or a double pipe character (`ǁ`) draw a double bar between columns. Row lines are added to the table string itself, with either `---` or `===` in the row. .. jupyter-execute:: table = ''' A | B | C ---|---|--- 0 | 0 | 0 0 | 1 | 0 1 | 0 | 0 1 | 1 | 1 ''' logic.Table(table, colfmt='cc||c') Karnaugh Maps ------------- Karnaugh Maps, or K-Maps, are useful for simplifying a logical truth table into the smallest number of gates. Schemdraw can draw K-Maps, with 2, 3, or 4 input variables, using the :py:class:`schemdraw.logic.kmap.Kmap` class. .. jupyter-execute:: logic.Kmap(names='ABCD') The `names` parameter must be a string with 2, 3, or 4 characters, each defining the name of one input variable. The `truthtable` parameter contains a list of tuples defining the logic values to display in the map. The first `len(names)` elements are 0's and 1's defining the position of the cell, and the last element is the string to display in that cell. The `default` parameter is a string to show in each cell of the K-Map when that cell is undefined in the `truthtable`. For example, this 2x2 K-Map has a '1' in the 01 position, and 0's elsewhere: .. jupyter-execute:: logic.Kmap(names='AB', truthtable=[('01', '1')]) K-Maps are typically used by grouping sets of 1's together. These groupings can be drawn using the `groups` parameter. The keys of the `groups` dictionary define which cells to group together, and the values of the dictionary define style parameters for the circle around the group. Each key must be a string of length `len(names)`, with either a `0`, `1`, or `.` in each position. As an example, with `names='ABCD'`, a group key of `"1..."` will place a circle around all cells where A=1. Or `".00."` draws a circle around all cells where B and C are both 0. Groups will automatically "wrap" around the edges. Parameters of the style dictionary include `color`, `fill`, `lw`, and `ls`. .. jupyter-execute:: logic.Kmap(names='ABCD', truthtable=[('1100', '1'), ('1101', '1'), ('1111', '1'), ('1110', '1'), ('0101', '1'), ('0111', 'X'), ('1101', '1'), ('1111', '1'), ('0000', '1'), ('1000', '1')], groups={'11..': {'color': 'red', 'fill': '#ff000033'}, '.1.1': {'color': 'blue', 'fill': '#0000ff33'}, '.000': {'color': 'green', 'fill': '#00ff0033'}})
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/elements/.ipynb_checkpoints/logic-checkpoint.rst
0.887296
0.711681
logic-checkpoint.rst
pypi
Flowcharts and Diagrams ======================= .. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import flow Schemdraw provides basic symbols for flowcharting and state diagrams. The :py:mod:`schemdraw.flow.flow` module contains a set of functions for defining flowchart blocks and connecting lines that can be added to schemdraw Drawings. .. code-block:: python from schemdraw import flow Flowchart blocks: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=10, unit=.5) d.add(flow.Start().label('Start').drop('E')) d.add(flow.Arrow()) d.add(flow.Ellipse().label('Ellipse')) d.add(flow.Arrow()) d.add(flow.Box(label='Box')) d.add(flow.Arrow()) d.add(flow.RoundBox(label='RoundBox').drop('S')) d.add(flow.Arrow().down()) d.add(flow.Subroutine(label='Subroutine').drop('W')) d.add(flow.Arrow().left()) d.add(flow.Data(label='Data')) d.add(flow.Arrow()) d.add(flow.Decision(label='Decision')) d.add(flow.Arrow()) d.add(flow.Connect(label='Connect')) d.draw() Some elements have been defined with multiple names, which can be used depending on the context or user preference: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=10, unit=.5) d.add(flow.Terminal().label('Terminal').drop('E')) d.add(flow.Arrow()) d.add(flow.Process().label('Process')) d.add(flow.Arrow()) d.add(flow.RoundProcess().label('RoundProcess')) d.add(flow.Arrow()) d.add(flow.Circle(label='Circle')) d.add(flow.Arrow()) d.add(flow.State(label='State')) d.add(flow.Arrow()) d.add(flow.StateEnd(label='StateEnd')) d.draw() All flowchart symbols have 16 anchor positions named for the compass directions: 'N', 'S', 'E', 'W', 'NE', 'SE, 'NNE', etc., plus a 'center' anchor. The :py:class:`schemdraw.elements.intcircuits.Ic` element can be used with the flowchart elements to create blocks with other inputs/outputs per side if needed. The size of each block must be specified manually using `w` and `h` or `r` parameters to size each block to fit any labels. Connecting Lines ---------------- Typical flowcharts will use `Line` or `Arrow` elements to connect the boxes. The line and arrow elements have been included in the `flow` module for convenience. .. jupyter-execute:: with schemdraw.Drawing() as d: d.config(fontsize=10, unit=.5) d += flow.Terminal().label('Start') d += flow.Arrow() d += flow.Process().label('Do something').drop('E') d += flow.Arrow().right() d += flow.Process().label('Do something\nelse') Some flow diagrams, such as State Machine diagrams, often use curved connectors between states. Several Arc connectors are available. Each Arc element takes an `arrow` parameter, which may be '->', '<-', or '<->', to define the end(s) on which to draw arrowheads. Arc2 ^^^^ `Arc2` draws a symmetric quadratic Bezier curve between the endpoints, with curvature controlled by parameter `k`. Endpoints of the arc should be specified using `at()` and `to()` methods. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += (b := flow.State(arrow='->').label('B').at((4, 0))) d += flow.Arc2(arrow='->').at(a.NE).to(b.NW).color('deeppink').label('Arc2') d += flow.Arc2(k=.2, arrow='<->').at(b.SW).to(a.SE).color('mediumblue').label('Arc2') .. jupyter-execute:: :hide-code: d.draw() ArcZ and ArcN ^^^^^^^^^^^^^ These draw symmetric cubic Bezier curves between the endpoints. The `ArcZ` curve approaches the endpoints horizontally, and `ArcN` approaches them vertically. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += (b := flow.State().label('B').at((4, 4))) d += (c := flow.State().label('C').at((8, 0))) d += flow.ArcN(arrow='<->').at(a.N).to(b.S).color('deeppink').label('ArcN') d += flow.ArcZ(arrow='<->').at(b.E).to(c.W).color('mediumblue').label('ArcZ') .. jupyter-execute:: :hide-code: d.draw() Arc3 ^^^^ The `Arc3` curve is an arbitrary cubic Bezier curve, defined by endpoints and angle of approach to each endpoint. `ArcZ` and `ArcN` are simply `Arc3` defined with the angles as 0 and 180, or 90 and 270, respectively. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += (b := flow.State().label('B').at((3, 3))) d += flow.Arc3(th1=75, th2=-45, arrow='<->').at(a.N).to(b.SE).color('deeppink').label('Arc3') .. jupyter-execute:: :hide-code: d.draw() ArcLoop ^^^^^^^ The `ArcLoop` curve draws a partial circle that intersects the two endpoints, with the given radius. Often used in state machine diagrams to indicate cases where the state does not change. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: d += (a := flow.State().label('A')) d += flow.ArcLoop(arrow='<-').at(a.NW).to(a.NNE).color('mediumblue').label('ArcLoop', halign='center') .. jupyter-execute:: :hide-code: d.draw() Decisions --------- To label the decision branches, the :py:class:`schemdraw.flow.flow.Decision` element takes keyword arguments for each cardinal direction. For example: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing(fontsize=12, unit=1) .. jupyter-execute:: decision = flow.Decision(W='Yes', E='No', S='Maybe').label('Question?') .. jupyter-execute:: :hide-code: dec = d.add(decision) d.add(flow.Line().at(dec.W).left()) d.add(flow.Line().at(dec.E).right()) d.add(flow.Line().at(dec.S).down()) d.draw() Layout and Flow --------------- Without any directions specified, boxes flow top to bottom (see left image). If a direction is specified (right image), the flow will continue in that direction, starting the next arrow at an appropriate anchor. Otherwise, the `drop` method is useful for specifing where to begin the next arrow. .. jupyter-execute:: with schemdraw.Drawing() as d: d.config(fontsize=10, unit=.5) d += flow.Terminal().label('Start') d += flow.Arrow() d += flow.Process().label('Step 1') d += flow.Arrow() d += flow.Process().label('Step 2').drop('E') d += flow.Arrow().right() d += flow.Connect().label('Next') d += flow.Terminal().label('Start').at((4, 0)) d += flow.Arrow().theta(-45) d += flow.Process().label('Step 1') d += flow.Arrow() d += flow.Process().label('Step 2').drop('E') d += flow.Arrow().right() d += flow.Connect().label('Next') See the :ref:`galleryflow` Gallery for more examples.
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/elements/.ipynb_checkpoints/flow-checkpoint.rst
0.886482
0.667792
flow-checkpoint.rst
pypi
Customizing Elements ==================== .. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import elements as elm from schemdraw import logic from schemdraw.segments import * Grouping Elements ----------------- If a set of circuit elements are to be reused multiple times, they can be grouped into a single element. Create and populate a drawing, but set `show=False`. Instead, use the Drawing to create a new :py:class:`schemdraw.elements.ElementDrawing`, which converts the drawing into an element instance to add to other drawings. .. jupyter-execute:: :emphasize-lines: 8-10 with schemdraw.Drawing(show=False) as d1: d1 += elm.Resistor() d1.push() d1 += elm.Capacitor().down() d1 += elm.Line().left() d1.pop() with schemdraw.Drawing() as d2: # Add a second drawing for i in range(3): d2 += elm.ElementDrawing(d1) # Add the first drawing to it 3 times .. _customelements: Defining custom elements ------------------------ All elements are subclasses of :py:class:`schemdraw.elements.Element` or :py:class:`schemdraw.elements.Element2Term`. For elements consisting of several other already-defined elements (like a relay), :py:class:`schemdraw.elements.compound.ElementCompound` can be used for easy combining of multiple elements. Subclasses only need to define the `__init__` method in order to add lines, shapes, and text to the new element, all of which are defined using :py:class:`schemdraw.segments.Segment` classes. New Segments should be appended to the `Element.segments` attribute list. Coordinates are all defined in element cooridnates, where the element begins at (0, 0) and is drawn from left to right. The drawing engine will rotate and translate the element to its final position, and for two-terminal elements deriving from Element2Term, will add lead extensions to the correct length depending on the element's placement parameters. Therefore elements deriving from Element2Term should not define the lead extensions (e.g. a Resistor only defines the zig-zag portion). A standard resistor is 1 drawing unit long, and with default lead extension will become 3 units long. Segments include :py:class:`schemdraw.segments.Segment`, :py:class:`schemdraw.segments.SegmentPoly`, :py:class:`schemdraw.segments.SegmentCircle`, :py:class:`schemdraw.segments.SegmentArc`, :py:class:`schemdraw.segments.SegmentText`, and :py:class:`schemdraw.segments.SegmentBezier`. The subclassed `Element.__init__` method can be defined with extra parameters to help define the element options. In addition to the list of Segments, any named anchors and other parameters should be specified. Anchors should be added to the `Element.anchors` dictionary as {name: (x, y)} key/value pairs. The Element instance maintains its own parameters dictionary in `Element.params` that override the default drawing parameters. Parameters are resolved by a ChainMap of user arguments to the `Element` instance, the `Element.params` attribute, then the `schemdraw.Drawing` parameters, in that order. A common use of setting `Element.params` in the setup function is to change the default position of text labels, for example Transistor elements apply labels on the right side of the element by default, so they add to the setup: .. code-block:: self.params['lblloc'] = 'rgt' The user can still override this label position by creating, for example, `Transistor().label('Q1', loc='top')`. As an example, here's the definition of our favorite element, the resistor: .. code-block:: python class Resistor(Element2Term): def __init__(self, *d, **kwargs): super().__init__(*d, **kwargs) self.segments.append(Segment([(0, 0), (0.5*reswidth, resheight), (1.5*reswidth, -resheight), (2.5*reswidth, resheight), (3.5*reswidth, -resheight), (4.5*reswidth, resheight), (5.5*reswidth, -resheight), (6*reswidth, 0)])) The resistor is made of one path. `reswidth` and `resheight` are constants that define the height and width of the resistor zigzag (and are referenced by several other elements too). Browse the source code in the `Schemdraw.elements` submodule to see the definitions of the other built-in elements. Flux Capacitor Example ^^^^^^^^^^^^^^^^^^^^^^ For an example, let's make a flux capacitor circuit element. Since everyone knows a flux-capacitor has three branches, we should subclass the standard :py:class:`schemdraw.elements.Element` class instead of :py:class:`schemdraw.elements.Element2Term`. Start by importing the Segments and define the class name and `__init__` function: .. code-block:: python from schemdraw.segments import * class FluxCapacitor(Element): def __init__(self, *d, **kwargs): super().__init__(*d, **kwargs) The `d` and `kwargs` are passed to `super` to initialize the Element. We want a dot in the center of our flux capacitor, so start by adding a `SegmentCircle`. The `fclen` and `radius` variables could be set as arguments to the __init__ for the user to adjust, if desired, but here they are defined as constants in the __init__. .. code-block:: python fclen = 0.5 radius = 0.075 self.segments.append(SegmentCircle((0, 0), radius)) Next, add the paths as Segment instances, which are drawn as lines. The flux capacitor will have three paths, all extending from the center dot: .. code-block:: python self.segments.append(Segment([(0, 0), (0, -fclen*1.41)])) self.segments.append(Segment([(0, 0), (fclen, fclen)])) self.segments.append(Segment([(0, 0), (-fclen, fclen)])) And at the end of each path is an open circle. Append three more `SegmentCircle` instances. By specifying `fill=None` the SegmentCircle will always remain unfilled regardless of any `fill` arguments provided to `Drawing` or `FluxCapacitor`. .. code-block:: python self.segments.append(SegmentCircle((0, -fclen*1.41), 0.2, fill=None)) self.segments.append(SegmentCircle((fclen, fclen), 0.2, fill=None)) self.segments.append(SegmentCircle((-fclen, fclen), 0.2, fill=None)) Finally, we need to define anchor points so that other elements can be connected to the right places. Here, they're called `p1`, `p2`, and `p3` for lack of better names (what do you call the inputs to a flux capacitor?) Add these to the `self.anchors` dictionary. .. code-block:: python self.anchors['p1'] = (-fclen, fclen) self.anchors['p2'] = (fclen, fclen) self.anchors['p3'] = (0, -fclen*1.41) Here's the Flux Capacitor class all in one: .. jupyter-execute:: class FluxCapacitor(elm.Element): def __init__(self, *d, **kwargs): super().__init__(*d, **kwargs) radius = 0.075 fclen = 0.5 self.segments.append(SegmentCircle((0, 0), radius)) self.segments.append(Segment([(0, 0), (0, -fclen*1.41)])) self.segments.append(Segment([(0, 0), (fclen, fclen)])) self.segments.append(Segment([(0, 0), (-fclen, fclen)])) self.segments.append(SegmentCircle((0, -fclen*1.41), 0.2, fill=None)) self.segments.append(SegmentCircle((fclen, fclen), 0.2, fill=None)) self.segments.append(SegmentCircle((-fclen, fclen), 0.2, fill=None)) self.anchors['p1'] = (-fclen, fclen) self.anchors['p2'] = (fclen, fclen) self.anchors['p3'] = (0, -fclen*1.41) Try it out: .. jupyter-execute:: FluxCapacitor() Segment objects --------------- After an element is added to a drawing, the :py:class:`schemdraw.segments.Segment` objects defining it are accessible in the `segments` attribute list of the Element. For even more control over customizing individual pieces of an element, the parameters of a Segment can be changed. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: d += (n := logic.Nand()) n.segments[1].color = 'red' n.segments[1].zorder = 5 # Put the bubble on top .. jupyter-execute:: :hide-code: d.draw() Matplotlib axis --------------- When using the Matplotlib backend (the default), a final customization option is to use the Matplotlib figure and add to it. A :py:class:`schemdraw.Figure` is returned from the `draw` method, which contains `fig` and `ax` attributes holding the Matplotlib figure. .. jupyter-execute:: :emphasize-lines: 4-5 schemdraw.use('matplotlib') d = schemdraw.Drawing() d.add(elm.Resistor()) schemfig = d.draw() schemfig.ax.axvline(.5, color='purple', ls='--') schemfig.ax.axvline(2.5, color='orange', ls='-', lw=3); display(schemfig)
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/usage/customizing.rst
0.897803
0.693447
customizing.rst
pypi
.. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import elements as elm .. _placement: Placing Elements ================ Elements are added to a Drawing using the `add` method or `+=` shortcut. The Drawing maintains a current position and direction, such that the default placement of the next element will start at the end of the previous element, going in the same direction. .. jupyter-execute:: with schemdraw.Drawing() as d: d += elm.Capacitor() d += elm.Resistor() d += elm.Diode() If a direction method (`up`, `down`, `left`, `right`) is added to an element, the element is rotated in that direction, and future elements take the same direction: .. jupyter-execute:: with schemdraw.Drawing() as d: d += elm.Capacitor() d += elm.Resistor().up() d += elm.Diode() The `theta` method can be used to specify any rotation angle in degrees. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Resistor().theta(20).label('R1') d += elm.Resistor().label('R2') # Takes position and direction from R1 .. jupyter-execute:: :hide-code: d.draw() Anchors ------- All elements have a set of predefined "anchor" positions within the element. For example, a bipolar transistor has `base`, `emitter`, and `collector` anchors. All two-terminal elements have anchors named `start`, `center`, and `end`. The docstring for each element lists the available anchors. Once an element is added to the drawing, all its anchor positions will be added as attributes to the element object, so the base position of transistor assigned to variable `Q` may be accessed via `Q.base`. Rather than working in absolute (x, y) coordinates, anchors can be used to set the position of new elements. Using the `at` method, one element can be placed starting on the anchor of another element. For example, to draw an opamp and place a resistor on the output, store the Opamp instance to a variable. Then call the `at` method of the new element passing the `Opamp.out` anchor. After the resistor is drawn, the current drawing position is moved to the endpoint of the resistor. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: opamp = d.add(elm.Opamp()) d.add(elm.Resistor().right().at(opamp.out)) .. jupyter-execute:: :hide-code: d.draw() Python's walrus operator provides a convenient shorthand notation for adding an element using `+=` and storing it at the same time. The above code can be written equivalently as: .. code-block:: python d += (opamp := elm.Opamp()) d += elm.Resistor().right().at(opamp.out) The second purpose for anchors is aligning new elements with respect to existing elements. Suppose a resistor has just been placed, and now an Opamp should be connected to the resistor. The `anchor` method tells the Drawing which input on the Opamp should align with resistor. Here, an Opamp is placed at the end of a resistor, connected to the opamp's `in1` anchor (the inverting input). .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Resistor().label('R1') d += elm.Opamp().anchor('in1') .. jupyter-execute:: :hide-code: d.draw() Compared to anchoring the opamp at `in2` (the noninverting input): .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Resistor().label('R2') d += elm.Opamp().anchor('in2') .. jupyter-execute:: :hide-code: d.draw() Dimensions ---------- The inner zig-zag portion of a resistor has length of 1 unit, while the default lead extensions are 1 unit on each side, making the default total resistor length 3 units. Placement methods such as `at` and `to` accept a tuple of (x, y) position in these units. .. jupyter-execute:: :hide-code: with schemdraw.Drawing() as d: d += elm.Resistor() d += elm.Line(arrow='|-|').at((1, .7)).to((2, .7)).label('1.0').color('royalblue') d += elm.Line(arrow='|-|').at((0, -.7)).to((3, -.7)).label('Drawing.unit', 'bottom').color('royalblue') This default 2-terminal length can be changed using the `unit` parameter to the :py:meth:`schemdraw.Drawing.config` method: .. code-block:: python with schemdraw.Drawing() as d: d.config(unit=2) ... .. jupyter-execute:: :hide-code: with schemdraw.Drawing() as d: d.config(unit=2) d += elm.Resistor() d += elm.Line(arrow='|-|').at((.5, .7)).to((1.5, .7)).label('1.0').color('royalblue') d += elm.Line(arrow='|-|').at((0, -.7)).to((2, -.7)).label('Drawing.unit', 'bottom').color('royalblue') Two-Terminal Elements --------------------- In Schemdraw, a "Two-Terminal Element" is any element that can grow to fill a given length (this includes elements such as the Potentiometer, even though it electrically has three terminals). All two-terminal elements subclass :py:class:`schemdraw.elements.Element2Term`. They have some additional methods for setting placement and length. The `length` method sets an exact length for a two-terminal element. Alternatively, the `up`, `down`, `left`, and `right` methods on two-terminal elements take a length parameter. .. jupyter-execute:: :emphasize-lines: 5 with schemdraw.Drawing() as d: d += elm.Dot() d += elm.Resistor() d += elm.Dot() d += elm.Diode().length(6) d += elm.Dot() The `to` method will set an exact endpoint for a 2-terminal element. The starting point is still the ending location of the previous element. Notice the Diode is stretched longer than the standard element length in order to fill the diagonal distance. .. jupyter-execute:: :emphasize-lines: 4 with schemdraw.Drawing() as d: R = d.add(elm.Resistor()) C = d.add(elm.Capacitor().up()) Q = d.add(elm.Diode().to(R.start)) The `tox` and `toy` methods are useful for placing 2-terminal elements to "close the loop", without requiring an exact length. They extend the element horizontally or vertically to the x- or y- coordinate of the anchor given as the argument. These methods automatically change the drawing direction. Here, the Line element does not need to specify an exact length to fill the space and connect back with the Source. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: :emphasize-lines: 9 d += (C := elm.Capacitor()) d += elm.Diode() d += elm.Line().down() # Now we want to close the loop, but can use `tox` # to avoid having to know exactly how far to go. # The Line will extend horizontally to the same x-position # as the Capacitor's `start` anchor. d += elm.Line().tox(C.start) # Now close the loop by relying on the fact that all # two-terminal elements (including Source and Line) # are the same length by default d += elm.Source().up() .. jupyter-execute:: :hide-code: d.draw() Finally, exact endpoints can also be specified using the `endpoints` method. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: :emphasize-lines: 5 d += (R := elm.Resistor()) d += (Q := elm.Diode().down(6)) d += elm.Line().tox(R.start) d += elm.Capacitor().toy(R.start) d += elm.SourceV().endpoints(Q.end, R.start) .. jupyter-execute:: :hide-code: d.draw() Orientation ----------- The `flip` and `reverse` methods are useful for changing orientation of directional elements such as Diodes, but they do not affect the drawing direction. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Zener().label('Normal') d += elm.Zener().flip().label('Flip') d += elm.Zener().reverse().label('Reverse') .. jupyter-execute:: :hide-code: d.draw() Drawing State ------------- The :py:class:`schemdraw.Drawing` maintains a drawing state that includes the current x, y position, stored in the `Drawing.here` attribute as a (x, y) tuple, and drawing direction stored in the `Drawing.theta` attribute. A LIFO stack of drawing states can be used, via the :py:meth:`schemdraw.Drawing.push` and :py:meth:`schemdraw.Drawing.pop` method, for situations when it's useful to save the drawing state and come back to it later. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :emphasize-lines: 4,10 d += elm.Inductor() d += elm.Dot() print('d.here:', d.here) d.push() # Save this drawing position/direction for later d += elm.Capacitor().down() # Go off in another direction temporarily d += elm.Ground(lead=False) print('d.here:', d.here) d.pop() # Return to the pushed position/direction print('d.here:', d.here) d += elm.Diode() d.draw() Changing the drawing position can be accomplished by calling :py:meth:`schemdraw.Drawing.move` or :py:meth:`schemdraw.Drawing.move_from`. Drop and Hold Methods ********************* To place an element without moving the drawing position, use the :py:meth:`schemdraw.elements.Element.hold` method. The element will be placed without changing the drawing state. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :emphasize-lines: 5 d += elm.Diode() # Normal placement: drawing position moves to end of element d += elm.Dot().color('red') d.here = (0, -1) d += elm.Diode().hold() # Hold method prevents position from changing d += elm.Dot().color('blue') .. jupyter-execute:: :hide-code: d.draw() Three-terminal elements do not necessarily leave the drawing position where desired, so after drawing an element, the current drawing position can be set using the :py:meth:`schemdraw.elements.Element.drop` method to specify an anchor at which to place the cursor. This reduces the need to assign every element to a variable name. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :emphasize-lines: 5 d += elm.BjtNpn() d += elm.Resistor().label('R1') d.here = (5, 0) d += elm.BjtNpn().drop('emitter') d += elm.Resistor().label('R2') .. jupyter-execute:: :hide-code: d.draw() Connecting Elements ------------------- Typically, the :py:class:`schemdraw.elements.lines.Line` element is used to connect elements together. More complex line routing requires multiple Line elements. The :py:class:`schemdraw.elements.lines.Wire` element is used as a shortcut for placing multiple connecting lines at once. The Wire element connects the start and end points based on its `shape` parameter. The `k` parameter is used to set the distance before the wire first changes direction. .. list-table:: Wire Shape Parameters :widths: 25 50 :header-rows: 1 * - Shape Parameter - Description * - `-` - Direct Line * - `-\|` - Horizontal then vertical * - `\|-` - Vertical then horizontal * - `n` - Vertical-horizontal-vertical (like an n or u) * - `c` - Horizontal-vertical-horizontal (like a c or ↄ) * - `z` - Horizontal-diagonal-horizontal * - `N` - Vertical-diagonal-vertical .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() d += (A := elm.Dot().label('A', halign='right', ofst=(-.1, 0))) d += (B := elm.Dot().label('B').at((4, 4))) d += (C := elm.Dot().label('C', ofst=(-.2, 0)).at((7, 4))) d += (D := elm.Dot().label('D', ofst=(-.2, 0)).at((9, 0))) d += (E := elm.Dot().label('E', ofst=(-.2, 0)).at((11, 4))) d += (F := elm.Dot().label('F', ofst=(-.2, 0)).at((13, 0))) .. jupyter-execute:: d += elm.Wire('-', arrow='->').at(A.center).to(B.center).color('deeppink').label('"-"') d += elm.Wire('|-', arrow='->').at(A.center).to(B.center).color('mediumblue').label('"|-"') d += elm.Wire('-|', arrow='->').at(A.center).to(B.center).color('darkseagreen').label('"-|"') d += elm.Wire('c', k=-1, arrow='->').at(C.center).to(D.center).color('darkorange').label('"c"', halign='left') d += elm.Wire('n', arrow='->').at(C.center).to(D.center).color('orchid').label('"n"') d += elm.Wire('N', arrow='->').at(E.center).to(F.center).color('darkred').label('"N"', 'start', ofst=(-.1, -.75)) d += elm.Wire('z', k=.5, arrow='->').at(E.center).to(F.center).color('teal').label('"z"', halign='left', ofst=(0, .5)) .. jupyter-execute:: :hide-code: d.draw() Both `Line` and `Wire` elements take an `arrow` parameter, a string specification of arrowhead types at the start and end of the wire. The arrow string may contain "<", ">", for arrowheads, "\|" for an endcap, and "o" for a dot. Some examples are shown below: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: d += elm.Line(arrow='->').label('"->"', 'right') d += elm.Line(arrow='<-').at((0, -.75)).label('"<-"', 'right') d += elm.Line(arrow='<->').at((0, -1.5)).label('"<->"', 'right') d += elm.Line(arrow='|->').at((0, -2.25)).label('"|->"', 'right') d += elm.Line(arrow='|-o').at((0, -3.0)).label('"|-o"', 'right') .. jupyter-execute:: :hide-code: d.draw() Because dots are used to show connected wires, all two-terminal elements have `dot` and `idot` methods for quickly adding a dot at the end or beginning of the element, respectively. .. jupyter-execute:: elm.Resistor().dot() Keyword Arguments ----------------- All :py:class:`schemdraw.elements.Element` types take keyword arguments that can also be used to set element properties, partly for historical reasons but also for easy element setup via dictionary unpacking. The keyword arguments are equivalent to calling the Element setup methods. The keyword arguments are not validated or type checked, so the chained method interface described above is recommended for configuring elements. +--------------------+-------------------------------+ | Keyword Argument | Method Equivalent | +====================+===============================+ | `d='up'` | `.up()` | +--------------------+-------------------------------+ | `d='down'` | `.down()` | +--------------------+-------------------------------+ | `d='left'` | `.left()` | +--------------------+-------------------------------+ | `d='right'` | `.right()` | +--------------------+-------------------------------+ | `theta=X` | `.theta(X)` | +--------------------+-------------------------------+ | `at=X` or `xy=X` | `.at(X)` | +--------------------+-------------------------------+ | `flip=True` | `.flip()` | +--------------------+-------------------------------+ | `reverse=True` | `.reverse()` | +--------------------+-------------------------------+ | `anchor=X` | `.anchor(X)` | +--------------------+-------------------------------+ | `zoom=X` | `.scale(X)` | +--------------------+-------------------------------+ | `color=X` | `.color(X)` | +--------------------+-------------------------------+ | `fill=X` | `.fill(X)` | +--------------------+-------------------------------+ | `ls=X` | `.linestyle(X)` | +--------------------+-------------------------------+ | `lw=X` | `.linewidth(X)` | +--------------------+-------------------------------+ | `zorder=X` | `.zorder(X)` | +--------------------+-------------------------------+ | `move_cur=False` | `.hold()` | +--------------------+-------------------------------+ | `label=X` | `.label(X)` | +--------------------+-------------------------------+ | `botlabel=X` | `.label(X, loc='bottom')` | +--------------------+-------------------------------+ | `lftlabel=X` | `.label(X, loc='left')` | +--------------------+-------------------------------+ | `rgtlabel=X` | `.label(X, loc='right')` | +--------------------+-------------------------------+ | `toplabel=X` | `.label(X, loc='top')` | +--------------------+-------------------------------+ | `lblloc=X` | `.label(..., loc=X)` | +--------------------+-------------------------------+
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/usage/placement.rst
0.887552
0.699857
placement.rst
pypi
Getting Started =============== Installation ------------ schemdraw can be installed from pip using .. code-block:: bash pip install schemdraw or to include optional ``matplotlib`` backend dependencies: .. code-block:: bash pip install schemdraw[matplotlib] To allow the SVG drawing :ref:`backends` to render math expressions, install the optional `ziamath <https://ziamath.readthedocs.io>`_ dependency with: .. code-block:: bash pip install schemdraw[svgmath] Alternatively, schemdraw can be installed directly by downloading the source and running .. code-block:: bash pip install ./ Schemdraw requires Python 3.8 or higher. Overview --------- The :py:mod:`schemdraw` module allows for drawing circuit elements. :py:mod:`schemdraw.elements` contains :ref:`electrical` pre-defined for use in a drawing. A common import structure is: .. jupyter-execute:: import schemdraw import schemdraw.elements as elm To make a circuit diagram, a :py:class:`schemdraw.Drawing` is created and :py:class:`schemdraw.elements.Element` instances are added to it: .. jupyter-execute:: with schemdraw.Drawing() as d: d.add(elm.Resistor()) d.add(elm.Capacitor()) d.add(elm.Diode()) The `+=` operator may be used as shorthand notation to add elements to the drawing. This code is equivalent to the above: .. code-block:: python with schemdraw.Drawing() as d: d += elm.Resistor() d += elm.Capacitor() d += elm.Diode() Element placement and other properties and are set using a chained method interface, for example: .. jupyter-execute:: with schemdraw.Drawing() as d: d += elm.Resistor().label('100KΩ') d += elm.Capacitor().down().label('0.1μF', loc='bottom') d += elm.Line().left() d += elm.Ground() d += elm.SourceV().up().label('10V') Methods `up`, `down`, `left`, `right` specify the drawing direction, and `label` adds text to the element. If not specified, elements reuse the same direction from the previous element, and begin where the previous element ended. Using the `with` context manager is a convenience, letting the drawing be displayed and saved upon exiting the `with` block. Schematics may also be created simply by assinging a new Drawing instance, but this requires calling `draw()` and/or `save()` explicitly: .. code-block:: python d = schemdraw.Drawing() d += elm.Resistor() ... d.draw() d.save('my_circuit.svg') For full details of placing and stylizing elements, see :ref:`placement`. and :py:class:`schemdraw.elements.Element`. In general, parameters that control **what** is drawn are passed to the element itself, and parameters that control **how** things are drawn are set using chained Element methods. For example, to make a polarized Capacitor, pass `polar=True` as an argument to `Capacitor`, but to change the Capacitor's color, use the `.color()` method: `elm.Capacitor(polar=True).color('red')`. Viewing the Drawing ------------------- Jupyter ******* When run in a Jupyter notebook, the schematic will be drawn to the cell output after the `with` block is exited. If your schematics pop up in an external window, and you are using the Matplotlib backend, set Matplotlib to inline mode before importing schemdraw: .. code-block:: python %matplotlib inline For best results when viewing circuits in the notebook, use a vector figure format, such as svg before importing schemdraw: .. code-block:: python %config InlineBackend.figure_format = 'svg' Python Scripts and GUI/Web apps ******************************* If run as a Python script, the schematic will be opened in a pop-up window after the `with` block exits. Add the `show=False` option when creating the Drawing to suppress the window from appearing. .. code-block:: python with schemdraw.Drawing(show=False) as d: ... The raw image data as a bytes array can be obtained by calling `.get_imagedata()` with the after the `with` block exits. This can be useful for integrating schemdraw into an existing GUI or web application. .. code-block:: python with schemdraw.Drawing() as drawing: ... image_bytes = drawing.get_imagedata('svg') Headless Servers **************** When running on a server, sometimes there is no display available. The code may attempt to open the GUI preview window and fail. In these cases, try setting the Matplotlib backend to a non-GUI option. Before importing schemdraw, add these lines to use the Agg backend which does not have a GUI. Then get the drawing using `d.get_imagedata()`, or `d.save()` to get the image. .. code-block:: python import matplotlib matplotlib.use('Agg') # Set Matplotlib's backend here Alternatively, use Schemdraw's SVG backend (see :ref:`backends`). Saving Drawings --------------- To save the schematic to a file, add the `file` parameter when setting up the Drawing. The image type is determined from the file extension. Options include `svg`, `eps`, `png`, `pdf`, and `jpg` when using the Matplotlib backend, and `svg` when using the SVG backend. A vector format such as `svg` is recommended for best image quality. .. code-block:: python with schemdraw.Drawing(file='my_circuit.svg') as d: ... The Drawing may also be saved using with the :py:meth:`schemdraw.Drawing.save` method.
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/usage/start.rst
0.953221
0.68635
start.rst
pypi
.. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import elements as elm .. _placement: Placing Elements ================ Elements are added to a Drawing using the `add` method or `+=` shortcut. The Drawing maintains a current position and direction, such that the default placement of the next element will start at the end of the previous element, going in the same direction. .. jupyter-execute:: with schemdraw.Drawing() as d: d += elm.Capacitor() d += elm.Resistor() d += elm.Diode() If a direction method (`up`, `down`, `left`, `right`) is added to an element, the element is rotated in that direction, and future elements take the same direction: .. jupyter-execute:: with schemdraw.Drawing() as d: d += elm.Capacitor() d += elm.Resistor().up() d += elm.Diode() The `theta` method can be used to specify any rotation angle in degrees. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Resistor().theta(20).label('R1') d += elm.Resistor().label('R2') # Takes position and direction from R1 .. jupyter-execute:: :hide-code: d.draw() Anchors ------- All elements have a set of predefined "anchor" positions within the element. For example, a bipolar transistor has `base`, `emitter`, and `collector` anchors. All two-terminal elements have anchors named `start`, `center`, and `end`. The docstring for each element lists the available anchors. Once an element is added to the drawing, all its anchor positions will be added as attributes to the element object, so the base position of transistor assigned to variable `Q` may be accessed via `Q.base`. Rather than working in absolute (x, y) coordinates, anchors can be used to set the position of new elements. Using the `at` method, one element can be placed starting on the anchor of another element. For example, to draw an opamp and place a resistor on the output, store the Opamp instance to a variable. Then call the `at` method of the new element passing the `Opamp.out` anchor. After the resistor is drawn, the current drawing position is moved to the endpoint of the resistor. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: opamp = d.add(elm.Opamp()) d.add(elm.Resistor().right().at(opamp.out)) .. jupyter-execute:: :hide-code: d.draw() Python's walrus operator provides a convenient shorthand notation for adding an element using `+=` and storing it at the same time. The above code can be written equivalently as: .. code-block:: python d += (opamp := elm.Opamp()) d += elm.Resistor().right().at(opamp.out) The second purpose for anchors is aligning new elements with respect to existing elements. Suppose a resistor has just been placed, and now an Opamp should be connected to the resistor. The `anchor` method tells the Drawing which input on the Opamp should align with resistor. Here, an Opamp is placed at the end of a resistor, connected to the opamp's `in1` anchor (the inverting input). .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Resistor().label('R1') d += elm.Opamp().anchor('in1') .. jupyter-execute:: :hide-code: d.draw() Compared to anchoring the opamp at `in2` (the noninverting input): .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Resistor().label('R2') d += elm.Opamp().anchor('in2') .. jupyter-execute:: :hide-code: d.draw() Dimensions ---------- The inner zig-zag portion of a resistor has length of 1 unit, while the default lead extensions are 1 unit on each side, making the default total resistor length 3 units. Placement methods such as `at` and `to` accept a tuple of (x, y) position in these units. .. jupyter-execute:: :hide-code: with schemdraw.Drawing() as d: d += elm.Resistor() d += elm.Line(arrow='|-|').at((1, .7)).to((2, .7)).label('1.0').color('royalblue') d += elm.Line(arrow='|-|').at((0, -.7)).to((3, -.7)).label('Drawing.unit', 'bottom').color('royalblue') This default 2-terminal length can be changed using the `unit` parameter to the :py:meth:`schemdraw.Drawing.config` method: .. code-block:: python with schemdraw.Drawing() as d: d.config(unit=2) ... .. jupyter-execute:: :hide-code: with schemdraw.Drawing() as d: d.config(unit=2) d += elm.Resistor() d += elm.Line(arrow='|-|').at((.5, .7)).to((1.5, .7)).label('1.0').color('royalblue') d += elm.Line(arrow='|-|').at((0, -.7)).to((2, -.7)).label('Drawing.unit', 'bottom').color('royalblue') Two-Terminal Elements --------------------- In Schemdraw, a "Two-Terminal Element" is any element that can grow to fill a given length (this includes elements such as the Potentiometer, even though it electrically has three terminals). All two-terminal elements subclass :py:class:`schemdraw.elements.Element2Term`. They have some additional methods for setting placement and length. The `length` method sets an exact length for a two-terminal element. Alternatively, the `up`, `down`, `left`, and `right` methods on two-terminal elements take a length parameter. .. jupyter-execute:: :emphasize-lines: 5 with schemdraw.Drawing() as d: d += elm.Dot() d += elm.Resistor() d += elm.Dot() d += elm.Diode().length(6) d += elm.Dot() The `to` method will set an exact endpoint for a 2-terminal element. The starting point is still the ending location of the previous element. Notice the Diode is stretched longer than the standard element length in order to fill the diagonal distance. .. jupyter-execute:: :emphasize-lines: 4 with schemdraw.Drawing() as d: R = d.add(elm.Resistor()) C = d.add(elm.Capacitor().up()) Q = d.add(elm.Diode().to(R.start)) The `tox` and `toy` methods are useful for placing 2-terminal elements to "close the loop", without requiring an exact length. They extend the element horizontally or vertically to the x- or y- coordinate of the anchor given as the argument. These methods automatically change the drawing direction. Here, the Line element does not need to specify an exact length to fill the space and connect back with the Source. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: :emphasize-lines: 9 d += (C := elm.Capacitor()) d += elm.Diode() d += elm.Line().down() # Now we want to close the loop, but can use `tox` # to avoid having to know exactly how far to go. # The Line will extend horizontally to the same x-position # as the Capacitor's `start` anchor. d += elm.Line().tox(C.start) # Now close the loop by relying on the fact that all # two-terminal elements (including Source and Line) # are the same length by default d += elm.Source().up() .. jupyter-execute:: :hide-code: d.draw() Finally, exact endpoints can also be specified using the `endpoints` method. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: :emphasize-lines: 5 d += (R := elm.Resistor()) d += (Q := elm.Diode().down(6)) d += elm.Line().tox(R.start) d += elm.Capacitor().toy(R.start) d += elm.SourceV().endpoints(Q.end, R.start) .. jupyter-execute:: :hide-code: d.draw() Orientation ----------- The `flip` and `reverse` methods are useful for changing orientation of directional elements such as Diodes, but they do not affect the drawing direction. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :hide-output: d += elm.Zener().label('Normal') d += elm.Zener().flip().label('Flip') d += elm.Zener().reverse().label('Reverse') .. jupyter-execute:: :hide-code: d.draw() Drawing State ------------- The :py:class:`schemdraw.Drawing` maintains a drawing state that includes the current x, y position, stored in the `Drawing.here` attribute as a (x, y) tuple, and drawing direction stored in the `Drawing.theta` attribute. A LIFO stack of drawing states can be used, via the :py:meth:`schemdraw.Drawing.push` and :py:meth:`schemdraw.Drawing.pop` method, for situations when it's useful to save the drawing state and come back to it later. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :emphasize-lines: 4,10 d += elm.Inductor() d += elm.Dot() print('d.here:', d.here) d.push() # Save this drawing position/direction for later d += elm.Capacitor().down() # Go off in another direction temporarily d += elm.Ground(lead=False) print('d.here:', d.here) d.pop() # Return to the pushed position/direction print('d.here:', d.here) d += elm.Diode() d.draw() Changing the drawing position can be accomplished by calling :py:meth:`schemdraw.Drawing.move` or :py:meth:`schemdraw.Drawing.move_from`. Drop and Hold Methods ********************* To place an element without moving the drawing position, use the :py:meth:`schemdraw.elements.Element.hold` method. The element will be placed without changing the drawing state. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :emphasize-lines: 5 d += elm.Diode() # Normal placement: drawing position moves to end of element d += elm.Dot().color('red') d.here = (0, -1) d += elm.Diode().hold() # Hold method prevents position from changing d += elm.Dot().color('blue') .. jupyter-execute:: :hide-code: d.draw() Three-terminal elements do not necessarily leave the drawing position where desired, so after drawing an element, the current drawing position can be set using the :py:meth:`schemdraw.elements.Element.drop` method to specify an anchor at which to place the cursor. This reduces the need to assign every element to a variable name. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: :emphasize-lines: 5 d += elm.BjtNpn() d += elm.Resistor().label('R1') d.here = (5, 0) d += elm.BjtNpn().drop('emitter') d += elm.Resistor().label('R2') .. jupyter-execute:: :hide-code: d.draw() Connecting Elements ------------------- Typically, the :py:class:`schemdraw.elements.lines.Line` element is used to connect elements together. More complex line routing requires multiple Line elements. The :py:class:`schemdraw.elements.lines.Wire` element is used as a shortcut for placing multiple connecting lines at once. The Wire element connects the start and end points based on its `shape` parameter. The `k` parameter is used to set the distance before the wire first changes direction. .. list-table:: Wire Shape Parameters :widths: 25 50 :header-rows: 1 * - Shape Parameter - Description * - `-` - Direct Line * - `-\|` - Horizontal then vertical * - `\|-` - Vertical then horizontal * - `n` - Vertical-horizontal-vertical (like an n or u) * - `c` - Horizontal-vertical-horizontal (like a c or ↄ) * - `z` - Horizontal-diagonal-horizontal * - `N` - Vertical-diagonal-vertical .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() d += (A := elm.Dot().label('A', halign='right', ofst=(-.1, 0))) d += (B := elm.Dot().label('B').at((4, 4))) d += (C := elm.Dot().label('C', ofst=(-.2, 0)).at((7, 4))) d += (D := elm.Dot().label('D', ofst=(-.2, 0)).at((9, 0))) d += (E := elm.Dot().label('E', ofst=(-.2, 0)).at((11, 4))) d += (F := elm.Dot().label('F', ofst=(-.2, 0)).at((13, 0))) .. jupyter-execute:: d += elm.Wire('-', arrow='->').at(A.center).to(B.center).color('deeppink').label('"-"') d += elm.Wire('|-', arrow='->').at(A.center).to(B.center).color('mediumblue').label('"|-"') d += elm.Wire('-|', arrow='->').at(A.center).to(B.center).color('darkseagreen').label('"-|"') d += elm.Wire('c', k=-1, arrow='->').at(C.center).to(D.center).color('darkorange').label('"c"', halign='left') d += elm.Wire('n', arrow='->').at(C.center).to(D.center).color('orchid').label('"n"') d += elm.Wire('N', arrow='->').at(E.center).to(F.center).color('darkred').label('"N"', 'start', ofst=(-.1, -.75)) d += elm.Wire('z', k=.5, arrow='->').at(E.center).to(F.center).color('teal').label('"z"', halign='left', ofst=(0, .5)) .. jupyter-execute:: :hide-code: d.draw() Both `Line` and `Wire` elements take an `arrow` parameter, a string specification of arrowhead types at the start and end of the wire. The arrow string may contain "<", ">", for arrowheads, "\|" for an endcap, and "o" for a dot. Some examples are shown below: .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: d += elm.Line(arrow='->').label('"->"', 'right') d += elm.Line(arrow='<-').at((0, -.75)).label('"<-"', 'right') d += elm.Line(arrow='<->').at((0, -1.5)).label('"<->"', 'right') d += elm.Line(arrow='|->').at((0, -2.25)).label('"|->"', 'right') d += elm.Line(arrow='|-o').at((0, -3.0)).label('"|-o"', 'right') .. jupyter-execute:: :hide-code: d.draw() Because dots are used to show connected wires, all two-terminal elements have `dot` and `idot` methods for quickly adding a dot at the end or beginning of the element, respectively. .. jupyter-execute:: elm.Resistor().dot() Keyword Arguments ----------------- All :py:class:`schemdraw.elements.Element` types take keyword arguments that can also be used to set element properties, partly for historical reasons but also for easy element setup via dictionary unpacking. The keyword arguments are equivalent to calling the Element setup methods. The keyword arguments are not validated or type checked, so the chained method interface described above is recommended for configuring elements. +--------------------+-------------------------------+ | Keyword Argument | Method Equivalent | +====================+===============================+ | `d='up'` | `.up()` | +--------------------+-------------------------------+ | `d='down'` | `.down()` | +--------------------+-------------------------------+ | `d='left'` | `.left()` | +--------------------+-------------------------------+ | `d='right'` | `.right()` | +--------------------+-------------------------------+ | `theta=X` | `.theta(X)` | +--------------------+-------------------------------+ | `at=X` or `xy=X` | `.at(X)` | +--------------------+-------------------------------+ | `flip=True` | `.flip()` | +--------------------+-------------------------------+ | `reverse=True` | `.reverse()` | +--------------------+-------------------------------+ | `anchor=X` | `.anchor(X)` | +--------------------+-------------------------------+ | `zoom=X` | `.scale(X)` | +--------------------+-------------------------------+ | `color=X` | `.color(X)` | +--------------------+-------------------------------+ | `fill=X` | `.fill(X)` | +--------------------+-------------------------------+ | `ls=X` | `.linestyle(X)` | +--------------------+-------------------------------+ | `lw=X` | `.linewidth(X)` | +--------------------+-------------------------------+ | `zorder=X` | `.zorder(X)` | +--------------------+-------------------------------+ | `move_cur=False` | `.hold()` | +--------------------+-------------------------------+ | `label=X` | `.label(X)` | +--------------------+-------------------------------+ | `botlabel=X` | `.label(X, loc='bottom')` | +--------------------+-------------------------------+ | `lftlabel=X` | `.label(X, loc='left')` | +--------------------+-------------------------------+ | `rgtlabel=X` | `.label(X, loc='right')` | +--------------------+-------------------------------+ | `toplabel=X` | `.label(X, loc='top')` | +--------------------+-------------------------------+ | `lblloc=X` | `.label(..., loc=X)` | +--------------------+-------------------------------+
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/usage/.ipynb_checkpoints/placement-checkpoint.rst
0.887552
0.699857
placement-checkpoint.rst
pypi
Customizing Elements ==================== .. jupyter-execute:: :hide-code: %config InlineBackend.figure_format = 'svg' import schemdraw from schemdraw import elements as elm from schemdraw import logic from schemdraw.segments import * Grouping Elements ----------------- If a set of circuit elements are to be reused multiple times, they can be grouped into a single element. Create and populate a drawing, but set `show=False`. Instead, use the Drawing to create a new :py:class:`schemdraw.elements.ElementDrawing`, which converts the drawing into an element instance to add to other drawings. .. jupyter-execute:: :emphasize-lines: 8-10 with schemdraw.Drawing(show=False) as d1: d1 += elm.Resistor() d1.push() d1 += elm.Capacitor().down() d1 += elm.Line().left() d1.pop() with schemdraw.Drawing() as d2: # Add a second drawing for i in range(3): d2 += elm.ElementDrawing(d1) # Add the first drawing to it 3 times .. _customelements: Defining custom elements ------------------------ All elements are subclasses of :py:class:`schemdraw.elements.Element` or :py:class:`schemdraw.elements.Element2Term`. For elements consisting of several other already-defined elements (like a relay), :py:class:`schemdraw.elements.compound.ElementCompound` can be used for easy combining of multiple elements. Subclasses only need to define the `__init__` method in order to add lines, shapes, and text to the new element, all of which are defined using :py:class:`schemdraw.segments.Segment` classes. New Segments should be appended to the `Element.segments` attribute list. Coordinates are all defined in element cooridnates, where the element begins at (0, 0) and is drawn from left to right. The drawing engine will rotate and translate the element to its final position, and for two-terminal elements deriving from Element2Term, will add lead extensions to the correct length depending on the element's placement parameters. Therefore elements deriving from Element2Term should not define the lead extensions (e.g. a Resistor only defines the zig-zag portion). A standard resistor is 1 drawing unit long, and with default lead extension will become 3 units long. Segments include :py:class:`schemdraw.segments.Segment`, :py:class:`schemdraw.segments.SegmentPoly`, :py:class:`schemdraw.segments.SegmentCircle`, :py:class:`schemdraw.segments.SegmentArc`, :py:class:`schemdraw.segments.SegmentText`, and :py:class:`schemdraw.segments.SegmentBezier`. The subclassed `Element.__init__` method can be defined with extra parameters to help define the element options. In addition to the list of Segments, any named anchors and other parameters should be specified. Anchors should be added to the `Element.anchors` dictionary as {name: (x, y)} key/value pairs. The Element instance maintains its own parameters dictionary in `Element.params` that override the default drawing parameters. Parameters are resolved by a ChainMap of user arguments to the `Element` instance, the `Element.params` attribute, then the `schemdraw.Drawing` parameters, in that order. A common use of setting `Element.params` in the setup function is to change the default position of text labels, for example Transistor elements apply labels on the right side of the element by default, so they add to the setup: .. code-block:: self.params['lblloc'] = 'rgt' The user can still override this label position by creating, for example, `Transistor().label('Q1', loc='top')`. As an example, here's the definition of our favorite element, the resistor: .. code-block:: python class Resistor(Element2Term): def __init__(self, *d, **kwargs): super().__init__(*d, **kwargs) self.segments.append(Segment([(0, 0), (0.5*reswidth, resheight), (1.5*reswidth, -resheight), (2.5*reswidth, resheight), (3.5*reswidth, -resheight), (4.5*reswidth, resheight), (5.5*reswidth, -resheight), (6*reswidth, 0)])) The resistor is made of one path. `reswidth` and `resheight` are constants that define the height and width of the resistor zigzag (and are referenced by several other elements too). Browse the source code in the `Schemdraw.elements` submodule to see the definitions of the other built-in elements. Flux Capacitor Example ^^^^^^^^^^^^^^^^^^^^^^ For an example, let's make a flux capacitor circuit element. Since everyone knows a flux-capacitor has three branches, we should subclass the standard :py:class:`schemdraw.elements.Element` class instead of :py:class:`schemdraw.elements.Element2Term`. Start by importing the Segments and define the class name and `__init__` function: .. code-block:: python from schemdraw.segments import * class FluxCapacitor(Element): def __init__(self, *d, **kwargs): super().__init__(*d, **kwargs) The `d` and `kwargs` are passed to `super` to initialize the Element. We want a dot in the center of our flux capacitor, so start by adding a `SegmentCircle`. The `fclen` and `radius` variables could be set as arguments to the __init__ for the user to adjust, if desired, but here they are defined as constants in the __init__. .. code-block:: python fclen = 0.5 radius = 0.075 self.segments.append(SegmentCircle((0, 0), radius)) Next, add the paths as Segment instances, which are drawn as lines. The flux capacitor will have three paths, all extending from the center dot: .. code-block:: python self.segments.append(Segment([(0, 0), (0, -fclen*1.41)])) self.segments.append(Segment([(0, 0), (fclen, fclen)])) self.segments.append(Segment([(0, 0), (-fclen, fclen)])) And at the end of each path is an open circle. Append three more `SegmentCircle` instances. By specifying `fill=None` the SegmentCircle will always remain unfilled regardless of any `fill` arguments provided to `Drawing` or `FluxCapacitor`. .. code-block:: python self.segments.append(SegmentCircle((0, -fclen*1.41), 0.2, fill=None)) self.segments.append(SegmentCircle((fclen, fclen), 0.2, fill=None)) self.segments.append(SegmentCircle((-fclen, fclen), 0.2, fill=None)) Finally, we need to define anchor points so that other elements can be connected to the right places. Here, they're called `p1`, `p2`, and `p3` for lack of better names (what do you call the inputs to a flux capacitor?) Add these to the `self.anchors` dictionary. .. code-block:: python self.anchors['p1'] = (-fclen, fclen) self.anchors['p2'] = (fclen, fclen) self.anchors['p3'] = (0, -fclen*1.41) Here's the Flux Capacitor class all in one: .. jupyter-execute:: class FluxCapacitor(elm.Element): def __init__(self, *d, **kwargs): super().__init__(*d, **kwargs) radius = 0.075 fclen = 0.5 self.segments.append(SegmentCircle((0, 0), radius)) self.segments.append(Segment([(0, 0), (0, -fclen*1.41)])) self.segments.append(Segment([(0, 0), (fclen, fclen)])) self.segments.append(Segment([(0, 0), (-fclen, fclen)])) self.segments.append(SegmentCircle((0, -fclen*1.41), 0.2, fill=None)) self.segments.append(SegmentCircle((fclen, fclen), 0.2, fill=None)) self.segments.append(SegmentCircle((-fclen, fclen), 0.2, fill=None)) self.anchors['p1'] = (-fclen, fclen) self.anchors['p2'] = (fclen, fclen) self.anchors['p3'] = (0, -fclen*1.41) Try it out: .. jupyter-execute:: FluxCapacitor() Segment objects --------------- After an element is added to a drawing, the :py:class:`schemdraw.segments.Segment` objects defining it are accessible in the `segments` attribute list of the Element. For even more control over customizing individual pieces of an element, the parameters of a Segment can be changed. .. jupyter-execute:: :hide-code: d = schemdraw.Drawing() .. jupyter-execute:: d += (n := logic.Nand()) n.segments[1].color = 'red' n.segments[1].zorder = 5 # Put the bubble on top .. jupyter-execute:: :hide-code: d.draw() Matplotlib axis --------------- When using the Matplotlib backend (the default), a final customization option is to use the Matplotlib figure and add to it. A :py:class:`schemdraw.Figure` is returned from the `draw` method, which contains `fig` and `ax` attributes holding the Matplotlib figure. .. jupyter-execute:: :emphasize-lines: 4-5 schemdraw.use('matplotlib') d = schemdraw.Drawing() d.add(elm.Resistor()) schemfig = d.draw() schemfig.ax.axvline(.5, color='purple', ls='--') schemfig.ax.axvline(2.5, color='orange', ls='-', lw=3); display(schemfig)
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/usage/.ipynb_checkpoints/customizing-checkpoint.rst
0.897803
0.693447
customizing-checkpoint.rst
pypi
Getting Started =============== Installation ------------ schemdraw can be installed from pip using .. code-block:: bash pip install schemdraw or to include optional ``matplotlib`` backend dependencies: .. code-block:: bash pip install schemdraw[matplotlib] To allow the SVG drawing :ref:`backends` to render math expressions, install the optional `ziamath <https://ziamath.readthedocs.io>`_ dependency with: .. code-block:: bash pip install schemdraw[svgmath] Alternatively, schemdraw can be installed directly by downloading the source and running .. code-block:: bash pip install ./ Schemdraw requires Python 3.8 or higher. Overview --------- The :py:mod:`schemdraw` module allows for drawing circuit elements. :py:mod:`schemdraw.elements` contains :ref:`electrical` pre-defined for use in a drawing. A common import structure is: .. jupyter-execute:: import schemdraw import schemdraw.elements as elm To make a circuit diagram, a :py:class:`schemdraw.Drawing` is created and :py:class:`schemdraw.elements.Element` instances are added to it: .. jupyter-execute:: with schemdraw.Drawing() as d: d.add(elm.Resistor()) d.add(elm.Capacitor()) d.add(elm.Diode()) The `+=` operator may be used as shorthand notation to add elements to the drawing. This code is equivalent to the above: .. code-block:: python with schemdraw.Drawing() as d: d += elm.Resistor() d += elm.Capacitor() d += elm.Diode() Element placement and other properties and are set using a chained method interface, for example: .. jupyter-execute:: with schemdraw.Drawing() as d: d += elm.Resistor().label('100KΩ') d += elm.Capacitor().down().label('0.1μF', loc='bottom') d += elm.Line().left() d += elm.Ground() d += elm.SourceV().up().label('10V') Methods `up`, `down`, `left`, `right` specify the drawing direction, and `label` adds text to the element. If not specified, elements reuse the same direction from the previous element, and begin where the previous element ended. Using the `with` context manager is a convenience, letting the drawing be displayed and saved upon exiting the `with` block. Schematics may also be created simply by assinging a new Drawing instance, but this requires calling `draw()` and/or `save()` explicitly: .. code-block:: python d = schemdraw.Drawing() d += elm.Resistor() ... d.draw() d.save('my_circuit.svg') For full details of placing and stylizing elements, see :ref:`placement`. and :py:class:`schemdraw.elements.Element`. In general, parameters that control **what** is drawn are passed to the element itself, and parameters that control **how** things are drawn are set using chained Element methods. For example, to make a polarized Capacitor, pass `polar=True` as an argument to `Capacitor`, but to change the Capacitor's color, use the `.color()` method: `elm.Capacitor(polar=True).color('red')`. Viewing the Drawing ------------------- Jupyter ******* When run in a Jupyter notebook, the schematic will be drawn to the cell output after the `with` block is exited. If your schematics pop up in an external window, and you are using the Matplotlib backend, set Matplotlib to inline mode before importing schemdraw: .. code-block:: python %matplotlib inline For best results when viewing circuits in the notebook, use a vector figure format, such as svg before importing schemdraw: .. code-block:: python %config InlineBackend.figure_format = 'svg' Python Scripts and GUI/Web apps ******************************* If run as a Python script, the schematic will be opened in a pop-up window after the `with` block exits. Add the `show=False` option when creating the Drawing to suppress the window from appearing. .. code-block:: python with schemdraw.Drawing(show=False) as d: ... The raw image data as a bytes array can be obtained by calling `.get_imagedata()` with the after the `with` block exits. This can be useful for integrating schemdraw into an existing GUI or web application. .. code-block:: python with schemdraw.Drawing() as drawing: ... image_bytes = drawing.get_imagedata('svg') Headless Servers **************** When running on a server, sometimes there is no display available. The code may attempt to open the GUI preview window and fail. In these cases, try setting the Matplotlib backend to a non-GUI option. Before importing schemdraw, add these lines to use the Agg backend which does not have a GUI. Then get the drawing using `d.get_imagedata()`, or `d.save()` to get the image. .. code-block:: python import matplotlib matplotlib.use('Agg') # Set Matplotlib's backend here Alternatively, use Schemdraw's SVG backend (see :ref:`backends`). Saving Drawings --------------- To save the schematic to a file, add the `file` parameter when setting up the Drawing. The image type is determined from the file extension. Options include `svg`, `eps`, `png`, `pdf`, and `jpg` when using the Matplotlib backend, and `svg` when using the SVG backend. A vector format such as `svg` is recommended for best image quality. .. code-block:: python with schemdraw.Drawing(file='my_circuit.svg') as d: ... The Drawing may also be saved using with the :py:meth:`schemdraw.Drawing.save` method.
/schemdraw-0.17.tar.gz/schemdraw-0.17/docs/usage/.ipynb_checkpoints/start-checkpoint.rst
0.953221
0.68635
start-checkpoint.rst
pypi
# Installation # In your terminal (vagrant), do: ```bash cd [repo]/protected/config cp db.json.sample db.json cd [repo]/protected/schema virtualenv env . env/bin/activate pip install -r requirements.txt ``` Next time, when you want to run schemup: ```bash . env/bin/activate python update.py commit ``` # General # Schemup versions a database on a per-table basis. This means that table X can be at version 1, while table Y can be at version 2. All versioning data is stored in a special table called `schemup_tables`. This table keeps other (versioned) tables' schema history, including what their latest schemas should look like (somewhat similar to git history). Schemup provides 2 main features: validation (schemas synchronization checking), and migration (schemas updating). # Version declaration # This is basically just a map that states what version each table should be at. There are a couple of convenient helpers to build this map. ## Storm ORM This is achieved by using a decorator, and adding a special attribute `__version__` to model class declarations. ```python from storm.locals import * from schemup.orms import storm # Pass this to validate/upgrade commands. It should be a global # shared among model files, if there are several of them stormSchema = storm.StormSchema() @stormSchema.versioned class User(Storm): __storm_table__ = "user" __version__ = "knn_1" ``` ## JSON file Keep the map in a json file. **`versions.json`** ```json { "users": "nta_6", "message": "ntd_9" } ``` **`update.py`** ```python class DictSchema(object): def __init__(self, path): self.versions = json.load(open(path, "r")) def getExpectedTableVersions(self): return sorted(self.versions.iteritems()) # Pass this to validate/upgrade commands dictSchema = DictSchema("versions.json") ``` # Validation # Schemup helps keeping track, for each table, of the synchronization between 3 things: - The desired schema, declared in code, or data file (actually only version, no table structure). - The journaled schema (cached schema, recorded schema) in `schemup_tables` (both version and table structure). - The actual DB schema (table structure only, obviously). Full validation happens in 2 steps: ## Checking recorded schema vs. desired schema (version mismatches) ## This is done by simply comparing the versions declared in code with the latest version recorded in `schemup_tables`. Note that there is not (yet) an actually schema comparison. Out-of-sync tables detected by this validation indicate that the current schema in `schemup_tables` (and thus the actual schema, provided that they are in sync) need to be brought up-to-date with the desired schema (using Schemup migration feature). ## Checking recorded schema vs. actual schema (schema mismatches) ## This is done by getting the schema information from the DB (e.g. `information_schema.tables`), and compare them against the last recorded schema in `schemup_tables`. Mismatches detected by this validation usually means the schema was changed outside of Schemup's control, which should be avoided. ```python from schemup import validator from warp import runtime conn = runtime.store.get_database().raw_connect() dbSchema = postgres.PostgresSchema(conn) errors = validator.findSchemaMismatches(dbSchema) if errors: print "Schema mismatches, was the schema changed outside Schemup?" ``` # Migration # Schemup migration feature attempts to bring the real schema (and `schemup_tables`) up-to-date with the current ORM schema, by applying a series of "upgraders". Each upgrader is responsible for bringing a table from one version to another, using an upgrading function that will be run on the DB schema. An upgrader also has dependencies, which are the required versions of some tables before it can be run. For example, a foreign key referencing a table can only be added after the table is created. There are 2 types of upgraders: those created from decorated Python functions, and those loaded from YAML files. There is a command to load both types from files under a directory. ```python from schemup import commands # Load upgraders from .py & .yaml files under "migration" directory commands.load("migrations") ``` After getting all the necessary upgraders, the `upgrade` command can be used to carry out the migration. ```python from schemup import commands from warp import runtime from models import stormSchema conn = runtime.store.get_database().raw_connect() dbSchema = postgres.PostgresSchema(conn) commands.upgrade(dbSchema, stormSchema) ``` ## Python upgrading functions ## Note that the logic used by these functions must be immutable over time. Therefore application logic (functions, orm classes...) from other module must not be used directly, but copied for use only in the migrations; otherwise the migrations will be broken once application logic changes. ```python from schemup.upgraders import upgrader @upgrader('user', 'bgh_2', 'bgh_3') def user_add_email(dbSchema): dbSchema.execute("ALTER TABLE user ADD email VARCHAR") # Or running arbitrary code here @upgrader('order', None, 'knn_1', dependencies=[('user', 'bgh_1')]) def order_create(dbSchema): dbSchema.execute(""" CREATE TABLE order ( id integer NOT NULL PRIMARY KEY, user_id integer NOT NULL, CONSTRAINT order_user_id FOREIGN KEY (user_id) REFERENCES user(id) ) """) ``` ## Upgraders loaded from YAML files ## One file can contain multiple blocks delineated by `---`. Each block corresponds to an upgrader. If a block's `from` key is omitted, it defaults to the previous block's `to` key. ### One table per file ### **`user.yaml`** ```yaml --- # Another upgrader --- table: user from: bgh_2 to: bgh_3 sql: | ALTER TABLE user ADD email VARCHAR --- # Another upgrader ``` **`order.yaml`** ```yaml --- table: order from: null to: knn_1 depends: - [ user, bgh_1 ] sql: | CREATE TABLE order ( id integer NOT NULL PRIMARY KEY, user_id integer NOT NULL, CONSTRAINT order_user_id FOREIGN KEY (user_id) REFERENCES user(id) ) ``` ### One feature per file ### **`feature.add-rule-table.yaml`** ```yaml --- table: questionnaire_rule from: null to: nta_1 depends: - [questionnaire, nta_2] sql: | CREATE TABLE questionnaire_rule ( id SERIAL NOT NULL PRIMARY KEY, created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT NOW(), issue TEXT, requires TEXT[2][], recommendations INTEGER[], questionnaire_id INTEGER NOT NULL REFERENCES questionnaire(id) ON DELETE RESTRICT ); --- table: questionnaire from: nta_3 to: nta_4 depends: - [questionnaire_rule, nta_2] sql: | ALTER TABLE questionnaire DROP COLUMN rules; ``` # Snapshoting # ## Whole schema ## Use this when you have an existing database whose schema changes need to be kept track of with Schemup. - Add version declarations. - Add correct schema migrations. This ensures that a new instance can be created from scratch. If there is not enough time, a workaround can be used: put the schema dump in one of the migration, leaving the rest of the migrations no-op (e.g. `SELECT 1;`). For example: ```yaml --- table: users from: null to: nta_1 sql: | # The whole schema here --- table: message from: nul to: nta_1 sql: | SELECT 1; # Other tables ``` - Use the `snapshot` command. ```python from schemup.dbs import postgres from schemup import commands from warp.runtime import store conn = store.get_database().raw_connect() dbSchema = postgres.PostgresSchema(conn) commands.snapshot(dbSchema, stormSchema) ``` ## Single table (aka I mistakenly changed the schema in SQL shell) ## Use this when you mistakenly chang a table's schema outside of schemup (e.g. trying out DDL in SQL shell without rolling back the transaction). This creates a schema mismatch ```python from warp.common.schema import makeSchema from warp.runtime import store schema = makeSchema(store) schema.setSchema("recommendation", "nta_5") schema.commit() ``` # Workflow # - When adding to an existing DB, use snapshotting. - When starting from scratch, provide upgraders with `from` equal to `None` (python) or `null` (yaml). - Version naming convention: programmer initials and integer id. Example: `bgh_1`, `bgh_2`, `knn_3`, `nta_4`, `knn_5`. - Migration organization: one-feature-per-file is preferred; initial schema can be in its own file. ## Upgraders ## - When there are schema changes, bump model classes' `__version__`. - Put upgraders under `migrations` directory. Upgraders can be yaml files, or python files containing upgrader-decorated functions. - Test the migration manually on a dev DB. - Remember that Postgres DDL is transactional. Therefore it is a good idea to try out migration DDL in Postgres shell, wrapped in a transaction that will be rolled back. ```sql START TRANSACTION; -- Try CREATE TABLE, ALTER TABLE... here ROLLBACK; ``` ## Migration ## - Back up the DB before doing migration. - Migration steps ```python from schemup.dbs import postgres from schemup import commands from warp.runtime import store # Get current table versions, by ORM from models import stormSchema # Get schema conn = store.get_database().raw_connect() dbSchema = postgres.PostgresSchema(conn) # Make sure the current DB is not "dirty" validator.findSchemaMismatches(dbSchema) # Load upgraders commands.load("migrations") # Do upgrade commands.upgrade(schema, stormSchema) # Check if the schemas are in sync commands.validate(runtime.schema, stormSchema) ``` ## Shared dev machine ## Schemup works on a forward-only, no-branching (directed acyclic graph) basis. This creates a problem in using shared dev machines: - Supposed the main branch is at `user:a1`, `message:b1`. - Developer A add migration `user:a_1` to `user:a_2` on his topic branch and test it on dev. - Developer B add migration `message:b_1` to `message:b_2` and wants to test it on dev. He checks out his branch and runs the migration. Because `user` is at `a_2`, but the code wants it to be at `a_1`, schemup tries migrating `user` from `a_2` to `a_1` and fails not knowing how. The best solution is to ensure that the DB's schema is the same before and after you test the code with new schema. For example: - Make a dump of the whole database before running schema migration. - Switch back to the branch the code was on previously after testing the new code. - Replace the current state of the database with the dump. ## Snapshot-less application of schemup to existing DB ## This method was by proposed Duy. The idea is to use a dump as the DB's initial state, instead of a blank DB. The process looks like: - Start with no migrations, blank version declarations. - New instance are provisioned by the initial dump instead of just a blank DB. - Continue as normal. - New migrations should be written with the non-blank initial DB's state in mind. For example if the dump already contains a table `user`, its migrations should look like: ```yaml --- table: user from: null to: lmd_1 sql: | ALTER TABLE user ADD COLUMN age INTEGER DEFAULT NULL; ``` and not ```yaml --- table: user from: null to: lmd_1 sql: | CREATE TABLE user ( # ... ) --- table: user from: lmd_1 to: lmd_2 sql: | ALTER TABLE user ADD COLUMN age INTEGER DEFAULT NULL; ```
/schemup-1.0.1.zip/schemup-1.0.1/README.md
0.675978
0.727806
README.md
pypi
2.1.1 / 2021-08-17 ================== - Update error message for incorrect choices field `#572 <https://github.com/schematics/schematics/pull/572>`__ (`begor <https://github.com/begor>`__) - Avoid some deprecation warnings when using Python 3 `#576 <https://github.com/schematics/schematics/pull/576>`__ (`jesuslosada <https://github.com/jesuslosada>`__) - Fix EnumType enums with value=0 not working with use_values=True `#594 <https://github.com/schematics/schematics/pull/594>`__ (`nikhilgupta345 <https://github.com/nikhilgupta345>`__) - Fix syntax warning over comparison of literals using is. `#611 <https://github.com/schematics/schematics/pull/611>`__ (`tirkarthi <https://github.com/tirkarthi>`__) - Add help text generation capability to Models `#543 <https://github.com/schematics/schematics/pull/543>`__ (`MartinHowarth <https://github.com/MartinHowarth>`__) - Update documentation `#578 <https://github.com/schematics/schematics/pull/578>`__ (`BobDu <https://github.com/BobDu>`__) `#604 <https://github.com/schematics/schematics/pull/604>`__ (`BryanChan777 <https://github.com/BryanChan777>`__) `#605 <https://github.com/schematics/schematics/pull/605>`__ (`timgates42 <https://github.com/timgates42>`__) `#608 <https://github.com/schematics/schematics/pull/608>`__ (`dasubermanmind <https://github.com/dasubermanmind>`__) - Add test coverage for model validation inside Dict/List `#588 <https://github.com/schematics/schematics/pull/588>`__ (`borgstrom <https://github.com/borgstrom>`__) - Added German translation `#614 <https://github.com/schematics/schematics/pull/614>`__ (`hkage <https://github.com/hkage>`__) 2.1.0 / 2018-06-25 ================== **[BREAKING CHANGE]** - Drop Python 2.6 support `#517 <https://github.com/schematics/schematics/pull/517>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) Other changes: - Add TimedeltaType `#540 <https://github.com/schematics/schematics/pull/540>`__ (`gabisurita <https://github.com/gabisurita>`__) - Allow to create Model fields dynamically `#512 <https://github.com/schematics/schematics/pull/512>`__ (`lkraider <https://github.com/lkraider>`__) - Allow ModelOptions to have extra parameters `#449 <https://github.com/schematics/schematics/pull/449>`__ (`rmb938 <https://github.com/rmb938>`__) `#506 <https://github.com/schematics/schematics/pull/506>`__ (`ekampf <https://github.com/ekampf>`__) - Accept callables as serialize roles `#508 <https://github.com/schematics/schematics/pull/508>`__ (`lkraider <https://github.com/lkraider>`__) (`jaysonsantos <https://github.com/jaysonsantos>`__) - Simplify PolyModelType.find_model for readability `#537 <https://github.com/schematics/schematics/pull/537>`__ (`kstrauser <https://github.com/kstrauser>`__) - Enable PolyModelType recursive validation `#535 <https://github.com/schematics/schematics/pull/535>`__ (`javiertejero <https://github.com/javiertejero>`__) - Documentation fixes `#509 <https://github.com/schematics/schematics/pull/509>`__ (`Tuoris <https://github.com/Tuoris>`__) `#514 <https://github.com/schematics/schematics/pull/514>`__ (`tommyzli <https://github.com/tommyzli>`__) `#518 <https://github.com/schematics/schematics/pull/518>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#546 <https://github.com/schematics/schematics/pull/546>`__ (`harveyslash <https://github.com/harveyslash>`__) - Fix Model.init validation when partial is True `#531 <https://github.com/schematics/schematics/issues/531>`__ (`lkraider <https://github.com/lkraider>`__) - Minor number types refactor and mocking fixes `#519 <https://github.com/schematics/schematics/pull/519>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) `#520 <https://github.com/schematics/schematics/pull/520>`__ (`rooterkyberian <https://github.com/rooterkyberian>`__) - Add ability to import models as strings `#496 <https://github.com/schematics/schematics/pull/496>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) - Add EnumType `#504 <https://github.com/schematics/schematics/pull/504>`__ (`ekamil <https://github.com/ekamil>`__) - Dynamic models: Possible memory issues because of _subclasses `#502 <https://github.com/schematics/schematics/pull/502>`__ (`mjrk <https://github.com/mjrk>`__) - Add type hints to constructors of field type classes `#488 <https://github.com/schematics/schematics/pull/488>`__ (`KonishchevDmitry <https://github.com/KonishchevDmitry>`__) - Regression: Do not call field validator if field has not been set `#499 <https://github.com/schematics/schematics/pull/499>`__ (`cmonfort <https://github.com/cmonfort>`__) - Add possibility to translate strings and add initial pt_BR translations `#495 <https://github.com/schematics/schematics/pull/495>`__ (`jaysonsantos <https://github.com/jaysonsantos>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.1 / 2017-05-30 ================== - Support for raising DataError inside custom validate_fieldname methods. `#441 <https://github.com/schematics/schematics/pull/441>`__ (`alexhayes <https://github.com/alexhayes>`__) - Add specialized SchematicsDeprecationWarning. (`lkraider <https://github.com/lkraider>`__) - DateTimeType to_native method should handle type errors gracefully. `#491 <https://github.com/schematics/schematics/pull/491>`__ (`e271828- <https://github.com/e271828->`__) - Allow fields names to override the mapping-interface methods. `#489 <https://github.com/schematics/schematics/pull/489>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) (`lkraider <https://github.com/lkraider>`__) 2.0.0 / 2017-05-22 ================== **[BREAKING CHANGE]** Version 2.0 introduces many API changes, and it is not fully backwards-compatible with 1.x code. `Full Changelog <https://github.com/schematics/schematics/compare/v1.1.2...v2.0.0>`_ - Add syntax highlighting to README examples `#486 <https://github.com/schematics/schematics/pull/486>`__ (`gabisurita <https://github.com/gabisurita>`__) - Encode Unsafe data state in Model `#484 <https://github.com/schematics/schematics/pull/484>`__ (`lkraider <https://github.com/lkraider>`__) - Add MACAddressType `#482 <https://github.com/schematics/schematics/pull/482>`__ (`aleksej-paschenko <https://github.com/aleksej-paschenko>`__) 2.0.0.b1 / 2017-04-06 ===================== - Enhancing and addressing some issues around exceptions: `#477 <https://github.com/schematics/schematics/pull/477>`__ (`toumorokoshi <https://github.com/toumorokoshi>`__) - Allow primitive and native types to be inspected `#431 <https://github.com/schematics/schematics/pull/431>`__ (`chadrik <https://github.com/chadrik>`__) - Atoms iterator performance improvement `#476 <https://github.com/schematics/schematics/pull/476>`__ (`vovanbo <https://github.com/vovanbo>`__) - Fixes 453: Recursive import\_loop with ListType `#475 <https://github.com/schematics/schematics/pull/475>`__ (`lkraider <https://github.com/lkraider>`__) - Schema API `#466 <https://github.com/schematics/schematics/pull/466>`__ (`lkraider <https://github.com/lkraider>`__) - Tweak code example to avoid sql injection `#462 <https://github.com/schematics/schematics/pull/462>`__ (`Ian-Foote <https://github.com/Ian-Foote>`__) - Convert readthedocs links for their .org -> .io migration for hosted projects `#454 <https://github.com/schematics/schematics/pull/454>`__ (`adamchainz <https://github.com/adamchainz>`__) - Support all non-string Iterables as choices (dev branch) `#436 <https://github.com/schematics/schematics/pull/436>`__ (`di <https://github.com/di>`__) - When testing if a values is None or Undefined, use 'is'. `#425 <https://github.com/schematics/schematics/pull/425>`__ (`chadrik <https://github.com/chadrik>`__) 2.0.0a1 / 2016-05-03 ==================== - Restore v1 to\_native behavior; simplify converter code `#412 <https://github.com/schematics/schematics/pull/412>`__ (`bintoro <https://github.com/bintoro>`__) - Change conversion rules for booleans `#407 <https://github.com/schematics/schematics/pull/407>`__ (`bintoro <https://github.com/bintoro>`__) - Test for Model.\_\_init\_\_ context passing to types `#399 <https://github.com/schematics/schematics/pull/399>`__ (`sheilatron <https://github.com/sheilatron>`__) - Code normalization for Python 3 + general cleanup `#391 <https://github.com/schematics/schematics/pull/391>`__ (`bintoro <https://github.com/bintoro>`__) - Add support for arbitrary field metadata. `#390 <https://github.com/schematics/schematics/pull/390>`__ (`chadrik <https://github.com/chadrik>`__) - Introduce MixedType `#380 <https://github.com/schematics/schematics/pull/380>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev2 / 2016-02-06 ======================= - Type maintenance `#383 <https://github.com/schematics/schematics/pull/383>`__ (`bintoro <https://github.com/bintoro>`__) 2.0.0.dev1 / 2016-02-01 ======================= - Performance optimizations `#378 <https://github.com/schematics/schematics/pull/378>`__ (`bintoro <https://github.com/bintoro>`__) - Validation refactoring + exception redesign `#374 <https://github.com/schematics/schematics/pull/374>`__ (`bintoro <https://github.com/bintoro>`__) - Fix typo: serilaizataion --> serialization `#373 <https://github.com/schematics/schematics/pull/373>`__ (`jeffwidman <https://github.com/jeffwidman>`__) - Add support for undefined values `#372 <https://github.com/schematics/schematics/pull/372>`__ (`bintoro <https://github.com/bintoro>`__) - Serializable improvements `#371 <https://github.com/schematics/schematics/pull/371>`__ (`bintoro <https://github.com/bintoro>`__) - Unify import/export interface across all types `#368 <https://github.com/schematics/schematics/pull/368>`__ (`bintoro <https://github.com/bintoro>`__) - Correctly decode bytestrings in Python 3 `#365 <https://github.com/schematics/schematics/pull/365>`__ (`bintoro <https://github.com/bintoro>`__) - Fix NumberType.to\_native() `#364 <https://github.com/schematics/schematics/pull/364>`__ (`bintoro <https://github.com/bintoro>`__) - Make sure field.validate() uses a native type `#363 <https://github.com/schematics/schematics/pull/363>`__ (`bintoro <https://github.com/bintoro>`__) - Don't validate ListType items twice `#362 <https://github.com/schematics/schematics/pull/362>`__ (`bintoro <https://github.com/bintoro>`__) - Collect field validators as bound methods `#361 <https://github.com/schematics/schematics/pull/361>`__ (`bintoro <https://github.com/bintoro>`__) - Propagate environment during recursive import/export/validation `#359 <https://github.com/schematics/schematics/pull/359>`__ (`bintoro <https://github.com/bintoro>`__) - DateTimeType & TimestampType major rewrite `#358 <https://github.com/schematics/schematics/pull/358>`__ (`bintoro <https://github.com/bintoro>`__) - Always export empty compound objects as {} / [] `#351 <https://github.com/schematics/schematics/pull/351>`__ (`bintoro <https://github.com/bintoro>`__) - export\_loop cleanup `#350 <https://github.com/schematics/schematics/pull/350>`__ (`bintoro <https://github.com/bintoro>`__) - Fix FieldDescriptor.\_\_delete\_\_ to not touch model `#349 <https://github.com/schematics/schematics/pull/349>`__ (`bintoro <https://github.com/bintoro>`__) - Add validation method for latitude and longitude ranges in GeoPointType `#347 <https://github.com/schematics/schematics/pull/347>`__ (`wraziens <https://github.com/wraziens>`__) - Fix longitude values for GeoPointType mock and add tests `#344 <https://github.com/schematics/schematics/pull/344>`__ (`wraziens <https://github.com/wraziens>`__) - Add support for self-referential ModelType fields `#335 <https://github.com/schematics/schematics/pull/335>`__ (`bintoro <https://github.com/bintoro>`__) - avoid unnecessary code path through try/except `#327 <https://github.com/schematics/schematics/pull/327>`__ (`scavpy <https://github.com/scavpy>`__) - Get mock object for ModelType and ListType `#306 <https://github.com/schematics/schematics/pull/306>`__ (`kaiix <https://github.com/kaiix>`__) 1.1.3 / 2017-06-27 ================== * [Maintenance] (`#501 <https://github.com/schematics/schematics/issues/501>`_) Dynamic models: Possible memory issues because of _subclasses 1.1.2 / 2017-03-27 ================== * [Bug] (`#478 <https://github.com/schematics/schematics/pull/478>`_) Fix dangerous performance issue with ModelConversionError in nested models 1.1.1 / 2015-11-03 ================== * [Bug] (`befa202 <https://github.com/schematics/schematics/commit/befa202c3b3202aca89fb7ef985bdca06f9da37c>`_) Fix Unicode issue with DecimalType * [Documentation] (`41157a1 <https://github.com/schematics/schematics/commit/41157a13896bd32a337c5503c04c5e9cc30ba4c7>`_) Documentation overhaul * [Bug] (`860d717 <https://github.com/schematics/schematics/commit/860d71778421981f284c0612aec665ebf0cfcba2>`_) Fix import that was negatively affecting performance * [Feature] (`93b554f <https://github.com/schematics/schematics/commit/93b554fd6a4e7b38133c4da5592b1843101792f0>`_) Add DataObject to datastructures.py * [Bug] (`#236 <https://github.com/schematics/schematics/pull/236>`_) Set `None` on a field that's a compound type should honour that semantics * [Maintenance] (`#348 <https://github.com/schematics/schematics/pull/348>`_) Update requirements * [Maintenance] (`#346 <https://github.com/schematics/schematics/pull/346>`_) Combining Requirements * [Maintenance] (`#342 <https://github.com/schematics/schematics/pull/342>`_) Remove to_primitive() method from compound types * [Bug] (`#339 <https://github.com/schematics/schematics/pull/339>`_) Basic number validation * [Bug] (`#336 <https://github.com/schematics/schematics/pull/336>`_) Don't evaluate serializable when accessed through class * [Bug] (`#321 <https://github.com/schematics/schematics/pull/321>`_) Do not compile regex * [Maintenance] (`#319 <https://github.com/schematics/schematics/pull/319>`_) Remove mock from install_requires 1.1.0 / 2015-07-12 ================== * [Feature] (`#303 <https://github.com/schematics/schematics/pull/303>`_) fix ListType, validate_items adds to errors list just field name without... * [Feature] (`#304 <https://github.com/schematics/schematics/pull/304>`_) Include Partial Data when Raising ModelConversionError * [Feature] (`#305 <https://github.com/schematics/schematics/pull/305>`_) Updated domain verifications to fit to RFC/working standards * [Feature] (`#308 <https://github.com/schematics/schematics/pull/308>`_) Grennady ordered validation * [Feature] (`#309 <https://github.com/schematics/schematics/pull/309>`_) improves date_time_type error message for custom formats * [Feature] (`#310 <https://github.com/schematics/schematics/pull/310>`_) accept optional 'Z' suffix for UTC date_time_type format * [Feature] (`#311 <https://github.com/schematics/schematics/pull/311>`_) Remove commented lines from models.py * [Feature] (`#230 <https://github.com/schematics/schematics/pull/230>`_) Message normalization 1.0.4 / 2015-04-13 ================== * [Example] (`#286 <https://github.com/schematics/schematics/pull/286>`_) Add schematics usage with Django * [Feature] (`#292 <https://github.com/schematics/schematics/pull/292>`_) increase domain length to 10 for .holiday, .vacations * [Feature] (`#297 <https://github.com/schematics/schematics/pull/297>`_) Support for fields order in serialized format * [Feature] (`#300 <https://github.com/schematics/schematics/pull/300>`_) increase domain length to 32 1.0.3 / 2015-03-07 ================== * [Feature] (`#284 <https://github.com/schematics/schematics/pull/284>`_) Add missing requirement for `six` * [Feature] (`#283 <https://github.com/schematics/schematics/pull/283>`_) Update error msgs to print out invalid values in base.py * [Feature] (`#281 <https://github.com/schematics/schematics/pull/281>`_) Update Model.__eq__ * [Feature] (`#267 <https://github.com/schematics/schematics/pull/267>`_) Type choices should be list or tuple 1.0.2 / 2015-02-12 ================== * [Bug] (`#280 <https://github.com/schematics/schematics/issues/280>`_) Fix the circular import issue. 1.0.1 / 2015-02-01 ================== * [Feature] (`#184 <https://github.com/schematics/schematics/issues/184>`_ / `03b2fd9 <https://github.com/schematics/schematics/commit/03b2fd97fb47c00e8d667cc8ea7254cc64d0f0a0>`_) Support for polymorphic model fields * [Bug] (`#233 <https://github.com/schematics/schematics/pull/233>`_) Set field.owner_model recursively and honor ListType.field.serialize_when_none * [Bug](`#252 <https://github.com/schematics/schematics/pull/252>`_) Fixed project URL * [Feature] (`#259 <https://github.com/schematics/schematics/pull/259>`_) Give export loop to serializable when type has one * [Feature] (`#262 <https://github.com/schematics/schematics/pull/262>`_) Make copies of inherited meta attributes when setting up a Model * [Documentation] (`#276 <https://github.com/schematics/schematics/pull/276>`_) Improve the documentation of get_mock_object 1.0.0 / 2014-10-16 ================== * [Documentation] (`#239 <https://github.com/schematics/schematics/issues/239>`_) Fix typo with wording suggestion * [Documentation] (`#244 <https://github.com/schematics/schematics/issues/244>`_) fix wrong reference in docs * [Documentation] (`#246 <https://github.com/schematics/schematics/issues/246>`_) Using the correct function name in the docstring * [Documentation] (`#245 <https://github.com/schematics/schematics/issues/245>`_) Making the docstring match actual parameter names * [Feature] (`#241 <https://github.com/schematics/schematics/issues/241>`_) Py3k support 0.9.5 / 2014-07-19 ================== * [Feature] (`#191 <https://github.com/schematics/schematics/pull/191>`_) Updated import_data to avoid overwriting existing data. deserialize_mapping can now support partial and nested models. * [Documentation] (`#192 <https://github.com/schematics/schematics/pull/192>`_) Document the creation of custom types * [Feature] (`#193 <https://github.com/schematics/schematics/pull/193>`_) Add primitive types accepting values of any simple or compound primitive JSON type. * [Bug] (`#194 <https://github.com/schematics/schematics/pull/194>`_) Change standard coerce_key function to unicode * [Tests] (`#196 <https://github.com/schematics/schematics/pull/196>`_) Test fixes and cleanup * [Feature] (`#197 <https://github.com/schematics/schematics/pull/197>`_) Giving context to serialization * [Bug] (`#198 <https://github.com/schematics/schematics/pull/198>`_) Fixed typo in variable name in DateTimeType * [Feature] (`#200 <https://github.com/schematics/schematics/pull/200>`_) Added the option to turn of strict conversion when creating a Model from a dict * [Feature] (`#212 <https://github.com/schematics/schematics/pull/212>`_) Support exporting ModelType fields with subclassed model instances * [Feature] (`#214 <https://github.com/schematics/schematics/pull/214>`_) Create mock objects using a class's fields as a template * [Bug] (`#215 <https://github.com/schematics/schematics/pull/215>`_) PEP 8 FTW * [Feature] (`#216 <https://github.com/schematics/schematics/pull/216>`_) Datastructures cleanup * [Feature] (`#217 <https://github.com/schematics/schematics/pull/217>`_) Models cleanup pt 1 * [Feature] (`#218 <https://github.com/schematics/schematics/pull/218>`_) Models cleanup pt 2 * [Feature] (`#219 <https://github.com/schematics/schematics/pull/219>`_) Mongo cleanup * [Feature] (`#220 <https://github.com/schematics/schematics/pull/220>`_) Temporal cleanup * [Feature] (`#221 <https://github.com/schematics/schematics/pull/221>`_) Base cleanup * [Feature] (`#224 <https://github.com/schematics/schematics/pull/224>`_) Exceptions cleanup * [Feature] (`#225 <https://github.com/schematics/schematics/pull/225>`_) Validate cleanup * [Feature] (`#226 <https://github.com/schematics/schematics/pull/226>`_) Serializable cleanup * [Feature] (`#227 <https://github.com/schematics/schematics/pull/227>`_) Transforms cleanup * [Feature] (`#228 <https://github.com/schematics/schematics/pull/228>`_) Compound cleanup * [Feature] (`#229 <https://github.com/schematics/schematics/pull/229>`_) UUID cleanup * [Feature] (`#231 <https://github.com/schematics/schematics/pull/231>`_) Booleans as numbers 0.9.4 / 2013-12-08 ================== * [Feature] (`#178 <https://github.com/schematics/schematics/pull/178>`_) Added deserialize_from flag to BaseType for alternate field names on import * [Bug] (`#186 <https://github.com/schematics/schematics/pull/186>`_) Compoundtype support in ListTypes * [Bug] (`#181 <https://github.com/schematics/schematics/pull/181>`_) Removed that stupid print statement! * [Feature] (`#182 <https://github.com/schematics/schematics/pull/182>`_) Default roles system * [Documentation] (`#190 <https://github.com/schematics/schematics/pull/190>`_) Typos * [Bug] (`#177 <https://github.com/schematics/schematics/pull/177>`_) Removed `__iter__` from ModelMeta * [Documentation] (`#188 <https://github.com/schematics/schematics/pull/188>`_) Typos 0.9.3 / 2013-10-20 ================== * [Documentation] More improvements * [Feature] (`#147 <https://github.com/schematics/schematics/pull/147>`_) Complete conversion over to py.test * [Bug] (`#176 <https://github.com/schematics/schematics/pull/176>`_) Fixed bug preventing clean override of options class * [Bug] (`#174 <https://github.com/schematics/schematics/pull/174>`_) Python 2.6 support 0.9.2 / 2013-09-13 ================== * [Documentation] New History file! * [Documentation] Major improvements to documentation * [Feature] Renamed ``check_value`` to ``validate_range`` * [Feature] Changed ``serialize`` to ``to_native`` * [Bug] (`#155 <https://github.com/schematics/schematics/pull/155>`_) NumberType number range validation bugfix
/schemv-2.1.1.1.tar.gz/schemv-2.1.1.1/HISTORY.rst
0.787278
0.657799
HISTORY.rst
pypi
:github_url: .. _about: .. index:: single: synopsis single: motivation single: implementation single: use cases About ``scherbelberg`` ====================== .. _synopsis: Synopsis -------- *scherbelberg* provides both a :ref:`command line interface (CLI) <cli>` and a :ref:`Python application programming interface (API) <api>` for deploying and managing small `Dask`_-based HPC `clusters`_ in the `Hetzner cloud`_. .. warning:: Development status alpha, stability acceptable, :ref:`security <security>` also acceptable but implementation needs a review. .. _Hetzner cloud: https://www.hetzner.com/cloud .. _Dask: https://dask.org/ .. _clusters: https://en.wikipedia.org/wiki/Computer_cluster .. _projectname: Project's Name -------------- Next to impressive projects like `Fugaku`_, which is named after `Mount Fuji`_, the `TOP500`_ are clearly missing an entry from the city of `Leipzig`_. This project is named after one of the few significant "mountains" in the city, the "Scherbelberg", also known as the "`Rosentalhügel`_": 20 meters above the surrounding landscape and 125 meters above sea level. Starting out as a late 19th century landfill, it has since become part of a park-like landscape. As of 1975, a famously shaky steel `observation tower`_ with a rather `beautiful view`_ is located at its top, overlooking not only the `Leipziger Auenwald`_ forest but also the city's sewage treatment plant. .. _Fugaku: https://en.wikipedia.org/wiki/Fugaku_(supercomputer) .. _Mount Fuji: https://en.wikipedia.org/wiki/Mount_Fuji .. _TOP500: https://en.wikipedia.org/wiki/TOP500 .. _Leipzig: https://en.wikipedia.org/wiki/Leipzig .. _Rosentalhügel: https://commons.wikimedia.org/wiki/Category:Rosentalh%C3%BCgel_(Leipzig) .. _observation tower: https://commons.wikimedia.org/wiki/Category:Rosentalturm .. _beautiful view: https://commons.wikimedia.org/wiki/Category:Views_from_Rosentalturm .. _Leipziger Auenwald: https://en.wikipedia.org/wiki/Leipzig_Riverside_Forest .. _motivation: Motivation ---------- While Dask is wonderful for automating large, parallel, distributed computations, it can not solve the problem of its own deployment onto computer clusters. Instead, Dask plays nicely with established tools in the arena such as `slurm`_. Deploying Dask onto a custom cluster therefore requires a fair bit of time, background knowledge and technical skills in computer & network administration. One of the really appealing features of Dask is that it enables users to exploit huge quantities of cloud compute resources really efficiently. Cloud compute instances can usually be rented on a per-hour basis, making them an interesting target for sizable, short-lived, on-demand clusters. For cloud deployments like this, there is the Dask-related `cloud provider package`_, which surprisingly does not solve the entire problem of deployment. At the time of *scherbelberg*'s creation, it was both rather inflexible and lacking support for the Hetzner cloud. Companies like `Coiled`_, which is also the primary developer of Dask, have filled this niche with polished, web-front-end services (and equally polished APIs) for creating clusters on clouds, which effectively makes them resellers of cloud resources. *scherbelberg* aims at eliminating the resellers from the equation while trying to provide a minimal, independent, self-contained, yet fully operational solution. .. note:: The idea is to be able to build tools quickly and easily on top of *scherbelberg*. The `nahleberg`_ project aims at using *scherbelberg* to load computations from within `QGIS`_ off to on-demand Dask cluster from within QGIS' GUI - without the user having to write a single line of code. .. _cloud provider package: https://cloudprovider.dask.org/en/latest/ .. _slurm: https://slurm.schedmd.com/documentation.html .. _Coiled: https://coiled.io/ .. _nahleberg: https://github.com/pleiszenburg/nahleberg .. _QGIS: https://www.qgis.org/ .. _implementation: Implementation -------------- *scherbelberg* creates clusters on the Hetzner cloud quickly and completely from scratch without any prerequisites on the cloud's side. No pre-configured operating system, virtual machine or docker images are required. *scherbelberg* simply connects to the Hetzner cloud via its `REST API`_, creates the required number and kind of compute instances based on the latest `Ubuntu LTS`_ release, networks as well as secures the compute instances and deploys `mambaforge`_ onto them. Depending on the size of the cluster, creating an entire cluster from scratch with a single command or single API call requires anywhere from two to ten minutes. Destroying a cluster is done in under ten seconds. In many ways, *scherbelberg* is a quick and dirty bare-bones solution. It heavily relies on ``ssh`` and the systems' shell. It does not use any higher-end tools for mass-administration of computers such as `Chef`_ or `Ansible`_. .. note:: *scherbelberg*'s' primary objective is to provide a stack of Dask, `conda-forge`_ and Ubuntu as simple and cleanly as possible. .. note:: *scherbelberg* is designed as an **asynchronous** package using `asyncio`_. .. _mambaforge: https://github.com/conda-forge/miniforge#mambaforge .. _REST API: https://docs.hetzner.cloud/ .. _Ubuntu LTS: https://ubuntu.com/blog/what-is-an-ubuntu-lts-release .. _Chef: https://www.chef.io/ .. _Ansible: https://www.ansible.com/ .. _conda-forge: https://conda-forge.org/ .. _asyncio: https://docs.python.org/3/library/asyncio.html .. _usecases: Use Cases --------- Anything that Dask can be used for: Parallel & distributed computations, distributed memory, all on demand. In many cases, high-performant computational resources are only needed for short periods of time. In those cases, it might not be worth spending a lot of money on matching hardware. Being able to quickly create and destroy custom-configured computer clusters enables a different, very interesting kind of thinking.
/scherbelberg-0.0.6.tar.gz/scherbelberg-0.0.6/docs/source/about.rst
0.939803
0.671928
about.rst
pypi
import json from datetime import datetime from pydantic import BaseModel from scherkhan.models.enums import WebsocketMessageType class BaseMessage(BaseModel): """Base message model.""" type: WebsocketMessageType car_id: int data: dict class CmdMessageData(BaseModel): """Cmd message data model.""" cmd: str hash: int data: dict = None state: str class CmdMessage(BaseMessage): """Cmd message model.""" type: WebsocketMessageType = WebsocketMessageType.cmd data: CmdMessageData class RouteMessageData(BaseModel): """Route message data model.""" type: int dt_start: datetime distance: int points: str coord_src: int is_abs: bool class RouteMessage(BaseMessage): """Route message model.""" type: WebsocketMessageType = WebsocketMessageType.route data: RouteMessageData class BatteryVoltage(BaseModel): """Battery voltage model.""" value: float percent: int class EngineReason(BaseModel): """Engine reason model.""" id: int value: str class Engine(BaseModel): """Engine model.""" state_id: int remote_start_time: datetime = None rpm: int = None temperature: int is_running: bool reason: dict class StateMessageData(BaseModel): """State message data model.""" cabin_temperature: int = None coolant_temperature: int = None battery_voltage: BatteryVoltage speed: int = None odometer: int fuel: str = None gsm_rssi: int lat: float lng: float coord_src: int course: str = None is_online: bool data_lag: int last_connected: datetime msg: str = None satellites_number: int = None is_in_evacuation: bool = None is_mark_near: bool = None is_mark_immobilizer: bool = None is_in_route: bool is_ignition_on: bool is_pre_heater_on: bool is_lights_on: bool is_l_turn_signal: bool is_r_turn_signal: bool is_trunk_open: bool is_hood_open: bool is_driver_door_closed: bool is_brake_pressed: bool is_fl_door_open: bool is_fr_door_open: bool is_rl_door_open: bool is_rr_door_open: bool is_locked: bool state: str sim_balance: float sim_balance_updated: datetime sim_currency: str is_settings_synchronized: bool fines: int engine: Engine class StateMessage(BaseMessage): """State message model.""" type: WebsocketMessageType = WebsocketMessageType.state data: StateMessageData def parse_json_message(json_data: str): """Parse message.""" message = json.loads(json_data) if message["type"] == WebsocketMessageType.state.value: return StateMessage(**message) elif message["type"] == WebsocketMessageType.route.value: return RouteMessage(**message) elif message["type"] == WebsocketMessageType.route.value: return RouteMessage(**message)
/scherkhan_auto-0.1.0.tar.gz/scherkhan_auto-0.1.0/src/scherkhan/models/websocket.py
0.667473
0.29116
websocket.py
pypi
import time import numpy as np from schicluster import * from sklearn.cluster import KMeans, SpectralClustering from sklearn.metrics.cluster import adjusted_rand_score as ARI mm9dim = [197195432, 181748087, 159599783, 155630120, 152537259, 149517037, 152524553, 131738871, 124076172, 129993255, 121843856, 121257530, 120284312, 125194864, 103494974, 98319150, 95272651, 90772031, 61342430] hg19dim = [249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663, 146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540, 102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566, 155270560] # File list and labels of dataset Ramani 2017 ctlist = ['HeLa', 'HAP1', 'GM12878', 'K562'] network = [np.loadtxt('1mb_resolution/' + ct + '/sample_list.txt', dtype=np.str) for ct in ctlist] label = np.array([ctlist[i] for i in range(len(ctlist)) for j in range(len(network[i]))]).astype('U8') network = np.concatenate(network) chrom = [str(i + 1) for i in range(22)] + ['X'] chromsize = {chrom[i]: hg19dim[i] for i in range(len(chrom))} nc = 4 # CpG content for each bin cg = np.loadtxt('hg19/bin/hg19.1mb.bin.CpG.txt', dtype=np.str, skiprows=1, usecols=(0, 9, 11, 12)) cgdata = cg[:, 1:].astype(float) cgdata = cgdata[:, 2] / (cgdata[:, 1] - cgdata[:, 0]) cgdata[np.isnan(cgdata)] = 0.0 chrcg = {c: cgdata[cg[:, 0] == 'chr' + c] for c in chrom} # scHiCluster GPU start_time = time.time() cluster, embedding = hicluster_gpu(network, chromsize, nc=nc) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] # scHiCluster CPU start_time = time.time() cluster, embedding = hicluster_cpu(network, chromsize, nc=nc, ncpus=5) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] # PCA start_time = time.time() cluster, embedding = raw_pca(network, chromsize, nc=nc) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]] # Downsample reads to uniform the coverage of all the cells before PCA start_time = time.time() cluster, embedding = ds_pca(network, chromsize, nc=nc) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] # Use compartment score (PC1) of single cells start_time = time.time() cluster, embedding = compartment(network, chromsize, nc=nc) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] # Use contact-distance decay curve start_time = time.time() cluster, embedding = decay(network, chromsize, nc=nc) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] # scHiCluster without linear convolution start_time = time.time() cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, pad=0) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] # scHiCluster without random walk start_time = time.time() cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, rp=-1) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]] # scHiCluster without keeping the top elements start_time = time.time() cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, prct=-1) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]] np.save('/cellar/users/zhoujt1994/projects/scHiC/' + dataset + '/embedding/1mb_pad1_rwr_real.npy', embedding) # Random walk only start_time = time.time() cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, pad=0, prct=-1) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]] # Linear convolution only start_time = time.time() cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, rp=-1, prct=-1) print(time.time() - start_time) [ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]] [ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]]
/schicluster-1.3.2.tar.gz/schicluster-1.3.2/example/example.py
0.466846
0.317334
example.py
pypi
# SchNetPack - Deep Neural Networks for Atomistic Systems [![Build Status](https://travis-ci.com/atomistic-machine-learning/schnetpack.svg?branch=master)](https://travis-ci.com/atomistic-machine-learning/schnetpack) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black) [![](https://shields.io/badge/-Lightning--Hydra--Template-017F2F?style=flat&logo=github&labelColor=303030)](https://github.com/hobogalaxy/lightning-hydra-template) SchNetPack is a toolbox for the development and application of deep neural networks to the prediction of potential energy surfaces and other quantum-chemical properties of molecules and materials. It contains basic building blocks of atomistic neural networks, manages their training and provides simple access to common benchmark datasets. This allows for an easy implementation and evaluation of new models. ##### Features - SchNet - an end-to-end continuous-filter CNN for molecules and materials [1-3] - PaiNN - equivariant message-passing for molecules and materials [4] - Output modules for dipole moments, polarizability, stress, and general response properties - Modules for electrostatics, Ewald summation, ZBL repulsion - GPU-accelerated molecular dynamics code incl. path-integral MD, thermostats, barostats ##### Requirements: - python 3.8 - Atomic Simulation Environment (ASE) 3.21 - NumPy - PyTorch 1.9 - PyTorch Lightning 1.9.0 - Hydra 1.1 _**Note: We recommend using a GPU for training the neural networks.**_ ## Installation ### Install with pip The simplest way to install SchNetPack is through pip which will automatically get the source code from PyPI: ``` pip install schnetpack ``` ### Install from source You can also install the most recent code from our repository: ``` git clone https://github.com/atomistic-machine-learning/schnetpack.git cd schnetpack pip install . ``` ### Visualization with Tensorboard SchNetPack supports multiple logging backends over PyTorch Lightning. The default logger is Tensorboard, which can be installed via: ``` pip install tensorboard ``` ## Getting started The best place to get started is training a SchNetPack model on a common benchmark dataset via the command line interface (CLI). When installing SchNetPack, the training script `spktrain` is added to your PATH. The CLI is based on [Hydra](https://hydra.cc/) and oriented on the PyTorch Lightning/Hydra template that can be found [here](https://github.com/ashleve/lightning-hydra-template). This enables a flexible configuration of the model, data and training process. To fully take advantage of these features, it might be helpful to have a look at the Hydra and PyTorch Lightning docs. ### Example 1: QM9 In the following, we focus on using the CLI to train on the QM9 dataset, but the same procedure applies for the other benchmark datasets as well. First, create a working directory, where all data and runs will be stored: ``` mkdir spk_workdir cd spk_workdir ``` Then, the training of a SchNet model with default settings for QM9 can be started by: ``` spktrain experiment=qm9_atomwise ``` The script prints the defaults for the experiment config `qm9_atomwise`. The dataset will be downloaded automatically to `spk_workdir/data`, if it does not exist yet. Then, the training will be started. All values of the config can be changed from the command line, including the directories for run and data. By default, the model is stored in a directory with a unique run id hash as a subdirectory of `spk_workdir/runs`. This can be changed as follows: ``` spktrain experiment=qm9 run.data_dir=/my/data/dir run.path=~/all_my_runs run.id=this_run ``` If you call `spktrain experiment=qm9 --help`, you can see the full config with all the parameters that can be changed. Nested parameters can be changed as follows: ``` spktrain experiment=qm9_atomwise data_dir=<path> data.batch_size=64 ``` Hydra organizes parameters in config groups which allows hierarchical configurations consisting of multiple yaml files. This allows to easily change the whole dataset, model or representation. For instance, changing from the default SchNet representation to PaiNN, use: ``` spktrain experiment=qm9_atomwise data_dir=<path> model/representation=painn ``` It is a bit confusing at first when to use "." or "/". The slash is used, if you are loading a preconfigured config group, while the dot is used changing individual values. For example, the config group "model/representation" corresponds to the following part of the config: ``` model: representation: _target_: schnetpack.representation.PaiNN n_atom_basis: 128 n_interactions: 3 shared_interactions: false shared_filters: false radial_basis: _target_: schnetpack.nn.radial.GaussianRBF n_rbf: 20 cutoff: ${globals.cutoff} cutoff_fn: _target_: schnetpack.nn.cutoff.CosineCutoff cutoff: ${globals.cutoff} ``` If you would want to additionally change some value of this group, you could use: ``` spktrain experiment=qm9_atomwise data_dir=<path> model/representation=painn model.representation.n_interactions=5 ``` For more details on config groups, have a look at the [Hydra docs](https://hydra.cc/docs/next/tutorials/basic/your_first_app/config_groups). ### Example 2: Potential energy surfaces The example above uses `AtomisticModel` internally, which is a `pytorch_lightning.LightningModule`, to predict single properties. The following example will use the same class to predict potential energy surfaces, in particular energies with the appropriate derivates to obtain forces and stress tensors. This works since the pre-defined configuration for the MD17 dataset, provided from the command line by `experiment=md17`, is selecting the representation and output modules that `AtomisticModel` is using. A more detailed description of the configuration and how to build your custom configs can be found [here](https://schnetpack.readthedocs.io/en/latest/userguide/configs.html). The `spktrain` script can be used to train a model for a molecule from the MD17 datasets ``` spktrain experiment=md17 data.molecule=uracil ``` In the case of MD17, reference calculations of energies and forces are available. Therefore, one needs to set weights for the losses of those properties. The losses are defined as part of output definitions in the `task` config group: ``` task: outputs: - _target_: schnetpack.task.ModelOutput name: ${globals.energy_key} loss_fn: _target_: torch.nn.MSELoss metrics: mae: _target_: torchmetrics.regression.MeanAbsoluteError mse: _target_: torchmetrics.regression.MeanSquaredError loss_weight: 0.005 - _target_: schnetpack.task.ModelOutput name: ${globals.forces_key} loss_fn: _target_: torch.nn.MSELoss metrics: mae: _target_: torchmetrics.regression.MeanAbsoluteError mse: _target_: torchmetrics.regression.MeanSquaredError loss_weight: 0.995 ``` For a training on *energies** and *forces*, we recommend to put a stronger weight on the loss of the force prediction during training. By default, the loss weights are set to 0.005 for the energy and 0.995 for forces. This can be changed as follow: ``` spktrain experiment=md17 data.molecule=uracil task.outputs.0.loss_weight=0.005 task.outputs.1.loss_weight=0.995 ``` ### Logging Beyond the output of the command line, SchNetPack supports multiple logging backends over PyTorch Lightning. By default, the Tensorboard logger is activated. If TensorBoard is installed, the results can be shown by calling: ``` tensorboard --logdir=<rundir> ``` Furthermore, SchNetPack comes with configs for a CSV logger and [Aim](https://github.com/aimhubio/aim). These can be selected as follows: ``` spktrain experiment=md17 logger=csv ``` ## LAMMPS interface SchNetPack comes with an interface to LAMMPS. A detailed installation guide is linked in the [How-To section of our documentation](https://schnetpack.readthedocs.io/en/latest/howtos/lammps.html). ## Extensions SchNetPack can be used as a base for implementations of advanced atomistic neural networks and training tasks. For example, there exists an [extension package](https://github.com/atomistic-machine-learning/schnetpack-gschnet) called `schnetpack-gschnet` for the most recent version of cG-SchNet [5], a conditional generative model for molecules. It demonstrates how a complex training task can be implemented in a few custom classes while leveraging the hierarchical configuration and automated training procedure of the SchNetPack framework. ## Documentation For the full API reference, visit our [documentation](https://schnetpack.readthedocs.io). If you are using SchNetPack in your research, please cite: K.T. Schütt, P. Kessel, M. Gastegger, K. Nicoli, A. Tkatchenko, K.-R. Müller. SchNetPack: A Deep Learning Toolbox For Atomistic Systems. J. Chem. Theory Comput. [10.1021/acs.jctc.8b00908](http://dx.doi.org/10.1021/acs.jctc.8b00908) [arXiv:1809.01072](https://arxiv.org/abs/1809.01072). (2018) K. T. Schütt, S. S. P. Hessmann, N. W. A. Gebauer, J. Lederer, M. Gastegger. SchNetPack 2.0: A neural network toolbox for atomistic machine learning. [arXiv:2212.05517](https://arxiv.org/abs/2212.05517). (2022) ## Acknowledgements CLI and hydra configs for PyTorch Lightning are adapted from this template: [![](https://shields.io/badge/-Lightning--Hydra--Template-017F2F?style=flat&logo=github&labelColor=303030)](https://github.com/hobogalaxy/lightning-hydra-template) ## References * [1] K.T. Schütt. F. Arbabzadah. S. Chmiela, K.-R. Müller, A. Tkatchenko. *Quantum-chemical insights from deep tensor neural networks.* Nature Communications **8**. 13890 (2017) [10.1038/ncomms13890](http://dx.doi.org/10.1038/ncomms13890) * [2] K.T. Schütt. P.-J. Kindermans, H. E. Sauceda, S. Chmiela, A. Tkatchenko, K.-R. Müller. *SchNet: A continuous-filter convolutional neural network for modeling quantum interactions.* Advances in Neural Information Processing Systems 30, pp. 992-1002 (2017) [Paper](http://papers.nips.cc/paper/6700-schnet-a-continuous-filter-convolutional-neural-network-for-modeling-quantum-interactions) * [3] K.T. Schütt. P.-J. Kindermans, H. E. Sauceda, S. Chmiela, A. Tkatchenko, K.-R. Müller. *SchNet - a deep learning architecture for molecules and materials.* The Journal of Chemical Physics 148(24), 241722 (2018) [10.1063/1.5019779](https://doi.org/10.1063/1.5019779) * [4] K. T. Schütt, O. T. Unke, M. Gastegger *Equivariant message passing for the prediction of tensorial properties and molecular spectra.* International Conference on Machine Learning (pp. 9377-9388). PMLR, [Paper](https://proceedings.mlr.press/v139/schutt21a.html). * [5] N. W. A. Gebauer, M. Gastegger, S. S. P. Hessmann, K.-R. Müller, K. T. Schütt *Inverse design of 3d molecular structures with conditional generative neural networks.* Nature Communications **13**. 973 (2022) [10.1038/s41467-022-28526-y](https://doi.org/10.1038/s41467-022-28526-y)
/schnetpack-2.0.3.tar.gz/schnetpack-2.0.3/README.md
0.78695
0.990848
README.md
pypi
from selenium import webdriver from selenium.webdriver.common.by import By import time from . import helpers def get_publication_data(author_id: str, author_name: str = '', preferred_browser: str = "safari") -> list[dict[str, str]]: """Retrieves data from Google Scholar. This function is the primary web-utility that opens a new selenium window to automate visiting Google Scholar and extracting data. Args: author_id (str): Google Scholar ID of the author information to extract. author_name (str, optional): Name of the scholar. Defaults to ''. preferred_browser (str, optional): Browser to use to scrape data. Must be one of 'chrome', 'firefox', or 'safari'. Defaults to "safari". Returns: list[dict[str, str]]: A list of publication data, each dictionary containing keys for the `journal_title` and `authors` both having string keys. """ if preferred_browser == "chrome": browser = webdriver.Chrome elif preferred_browser == "firefox": browser = webdriver.Firefox elif preferred_browser == "safari": browser = webdriver.Safari else: raise ValueError("preferred_browser must be one of 'chrome', 'firefox', or 'safari'") data = [] profile_link = f"https://scholar.google.com/citations?&hl=en&user={author_id}" more_pubs = True loop = 0 while more_pubs: print(f"Scraping {author_name.title()} page {loop + 1}") time.sleep(5) driver = browser() driver.get(f"{profile_link}&cstart={loop*100}&pagesize=100&view_op=list_works&sortby=pubdate") more_pubs = driver.find_element(By.ID, 'gsc_bpf_more').is_enabled() if more_pubs: loop += 1 pub_elements = driver.find_elements(By.CSS_SELECTOR, "a.gsc_a_at") for pub in pub_elements: pub_info_link = pub.get_attribute("href") driver.get(pub_info_link) elements = driver.find_elements(By.CLASS_NAME, "gsc_oci_value") # TODO: get link as well (class=gsc_oci_title_link) title_html = driver.find_element(By.CLASS_NAME, "gsc_oci_title_link") if len(elements) > 3: data.append( {"authors": elements[0].text, "publication_year": elements[1].text, "journal_title": elements[2].text, "title": title_html.text} ) driver.back() driver.close() return data def scrape_single_author(scholar_id: str, scholar_name: str = '', preferred_browser: str = "safari"): """Scrapes data from google scholar and saves into json file. Args: scholar_id (str): Google Scholar ID of the author information to extract. scholar_name (str, optional): Name of the scholar. Defaults to ''. preferred_browser (str, optional): Browser to use to scrape data. Must be one of 'chrome', 'firefox', or 'safari'. Defaults to "safari". """ pub_data = get_publication_data(scholar_id, scholar_name, preferred_browser) helpers.append_pub_data_to_json(pub_data) print(f"Wrote {scholar_name if scholar_name else scholar_id} to file.")
/scholar-network-0.2.6.tar.gz/scholar-network-0.2.6/src/scholar_network/scraping.py
0.519034
0.215361
scraping.py
pypi
from collections import defaultdict from dataclasses import InitVar, dataclass, field from typing import Union @dataclass class CustomCounter: """Custom counter class built to emulate Counter from std lib. This class utilizes a specific use case for this program, and uses 'reflexive tuple matching' to get synonymous pairs. Parameters: lst (list[tuple[str, str]]): A list of author-tuple pairs. """ lst: InitVar[list[tuple[str, str]]] counts: dict[tuple[str, str], int] = field(init=False) def __post_init__(self, lst: list[tuple[str, str]]): """Converts author pairs into counts of unique pairs. After initialization, converts the list of tuple pairs into keys in a dictionary and increments the value by the count of the key. Also sorts the pairs so that reflexive tuples are caught. Stores the built dictionary in `self.counts`. Parameters: lst (list[tuple[str, str]]): A list of author-tuple pairs. """ result = defaultdict(int) for pair in lst: # by sorting we can ensure we catch the 'reflexive tuples' sorted_pair = tuple(sorted(pair)) result[sorted_pair] += 1 self.counts = result def most_common( self, limit: Union[int, None] = None ) -> list[tuple[tuple[str, str], int]]: """Sorts and returns ordered tuple pairs. Args: limit (Union[int, None], optional): Limit to return. Defaults to None. Returns: list[tuple[tuple[str, str], int]]: Returns most common tuple pairs. """ ordered_keys = sorted(self.counts, key=lambda x: self.counts[x], reverse=True) ordered_result: list[tuple[tuple[str, str], int]] = [] for item in ordered_keys: ordered_result.append((item, self.counts[item])) return ordered_result[:limit] if limit else ordered_result @dataclass(unsafe_hash=True) class Node: """Node is a container to hold authors. Attributes: name (str): name of the author """ __slots__ = "name" name: str def __str__(self) -> str: """Returns the name as the string. Returns: str: Name attribute of the node. """ return self.name @dataclass class Edge: """Edge connects Nodes that co-occur together in the same publication. Edges are directed in a DiGraph and undirected in Graph. ^^ Both of these cases are handled by the (Di)Graph classes not the Edge itself. Attributes: src (Node): Source node for the connection. dest (Node): Destination node for the connection. """ # TODO: add support for journal title as attribute of edge # TODO: want weight to track number of connections on each edge __slots__ = "src", "dest" src: Node dest: Node def __str__(self) -> str: """Makes the string representation an of the edge connection. Returns: str: Connection being represented by the edge in string form. """ return f"{self.src} -> {self.dest}" @dataclass class Digraph: """Directed graph class. This class utilizes Node and Edge to store graph connections. Attributes: edges (dict[Node, list[Node]]): Connections stored as a dict of src node mapped to a list of connected nodes. Repeats allowed. """ edges: dict[Node, list[Node]] = field(default_factory=dict) def add_node(self, node: Node): """Adds a new node to the graph. Args: node (Node): New Node to add. """ if node not in self.edges: self.edges[node] = [] else: pass def add_edge(self, edge: Edge): """Adds a new edge/connection to the graph. Args: edge (Edge): Edge to add to the graph Raises: ValueError: Raises ValueError if either src or dest Nodes not in graph. """ if not (edge.src in self.edges and edge.dest in self.edges): raise ValueError("Node not in graph") self.edges[edge.src].append(edge.dest) def children(self, node: Node) -> list[Node]: """Get's Nodes that `node` is linked to. Args: node (Node): Node to extract children from. Returns: list[Node]: List of Nodes connected to the target node. Duplicates allowed. """ return self.edges[node] def has_node(self, node: Node) -> bool: """Checks if the graph contains a given Node. Args: node (Node): Node to search the graph for. Returns: bool: True if the node exists, otherwise False. """ return node in self.edges def get_node(self, name: str) -> Node: """Gets a Node from the graph based on the Node's name attribute. Args: name (str): Name of the Node to return from the graph. Raises: NameError: If no matching Node can be found. Returns: Node: The Node matching the search name. """ for n in self.edges: if n.name == name: return n raise NameError(name) def node_pairs(self) -> list[tuple[str, str]]: """Generates pairs of nodes representing edge connections. Duplicates allowed. Returns: list[tuple[str, str]]: List of node pairs. """ pairs = [] for src in self.edges: for dest in self.edges[src]: pairs.append((src.name, dest.name)) return pairs # * start analytics section # * basics def vertex_count(self) -> int: """Counts the total number of Nodes in the graph. Returns: int: Number of Nodes in the graph. """ return len(self.edges) def edge_count(self) -> int: """Counts all the edges in the graph. Duplicates are counted. Returns: int: Number of edges/connections in the graph. """ return sum(len(self.children(node)) for node in self.edges) def vertex_degree(self, vertex: Union[Node, None] = None) -> Union[int, list[int]]: """Calculates the number of Nodes connected to the target vertex. If no target vertex provided, calculates connected Nodes for each Node in the graph. Args: vertex (Union[Node, None], optional): Target vertex to search for. Defaults to None. Returns: Union[int, list[int]]: Either the degree of the target vertex or a list of the degrees of all the nodes. """ if vertex: return len(self.children(vertex)) return sorted([len(self.children(n)) for n in self.edges]) # * advanced def edge_rank( self, vertex: Union[Node, None] = None, limit: Union[int, None] = None ) -> list[tuple[tuple[str, str], int]]: """Ranks the edges based on their weight. Ranks are calculated either for the entire graph (default) or for the specified Node. Args: vertex (Union[Node, None], optional): Node to run calculation on. Defaults to None. limit (Union[int, None], optional): Limit for the number of edges returned. Defaults to None. Returns: list[tuple[tuple[str, str], int]]: Returns a sorted (by weight) list of edges up to the limit parameter. """ if vertex: pairs = [] for partner in self.edges[vertex]: pairs.append((vertex.name, partner.name)) return ( CustomCounter(pairs).most_common(limit) if limit else CustomCounter(pairs).most_common() ) return ( CustomCounter(self.node_pairs()).most_common(limit) if limit else CustomCounter(self.node_pairs()).most_common() ) @dataclass class Graph(Digraph): """Undirected sub-class of the Digraph.""" def add_edge(self, edge: Edge): """Adds both (directed) edges to the graph.""" Digraph.add_edge(self, edge) rev = Edge(edge.dest, edge.src) Digraph.add_edge(self, rev)
/scholar-network-0.2.6.tar.gz/scholar-network-0.2.6/src/scholar_network/models.py
0.917912
0.557905
models.py
pypi
# Scholar Scraper This is a simple script to scrape the Google Scholar page of given authors and extract all the information about them ( publications, cites per year, etc.). The script is written in Python 3 and uses the [`scholarly`](https://github.com/scholarly-python-package/scholarly) library. <br/> ## Installation The script can be installed through `pip` and PyPI: ```bash pip install scholar-scraper ``` Or via Github sources: ```bash pip install git+https://github.com/guillaume-elambert/Scholar-Scraper-Python-API.git ``` <br/> Or it can be installed by cloning the repository: ```bash git clone https://github.com/guillaume-elambert/Scholar-Scraper-Python-API.git cd Scholar-Scraper-Python-API pip install -e . ``` <br/> ## Usage The script is simple to use. It takes a list of authors Google Scholar IDs as input and outputs a JSON file with all the information about the authors. ```python from scholar_scraper import scholar_scraper # Define the list of authors Google Scholar IDs scholarIds = [ '1iQtvdsAAAAJ', 'dAKCYJgAAAAJ' ] # Start scraping and print the resulted JSON to the console print(scholar_scraper.start_scraping(scholarIds)) ``` <br/> To reduce the time needed to scrape the authors, the script uses multithreading (one thread per user).<br/> The number of threads can be set using the `max_threads` parameter, which default value 10 : ```python from scholar_scraper import scholar_scraper # Define the maximum number of threads to use max_threads = 5 # Define the list of authors Google Scholar IDs scholarIds = [ '1iQtvdsAAAAJ', 'dAKCYJgAAAAJ' ] # Start scraping with maximum 5 threads and print the resulted JSON to the console # Since the number of authors is 2, the number of threads used will be reduced to 2 # If the number of authors is 10, the number of threads used will be reduced to 5 # If you don't pass any value to the max_threads parameter, the default value of 10 will be used print(scholar_scraper.start_scraping(scholarIds, max_threads)) ``` <br/> ## Output example The output is a JSON file containing all the information about the authors.<br/> Here is an example of the simplified output for the authors with the Google Scholar IDs `1iQtvdsAAAAJ` and `dAKCYJgAAAAJ`: ```json [ { "affiliation": "Tenure-Track Assistant Professor, DIEM, University of Salerno", "citedby": 796, "cites_per_year": { "2013": 2, "2014": 9, "2015": 21, "2016": 34, "2017": 46, "2018": 83, "2019": 127, "2020": 124, "2021": 182, "2022": 140, "2023": 22 }, "coauthors": [ { "affiliation": "University of Salerno, Dept. of Information and Electrical Engineering and Applied Math (DIEM)", "name": "Prof. Mario Vento, Ph.D.", "scholar_id": "3PwXGpgAAAAJ" }, ], "interests": [ "Pattern Recognition", "Artificial Intelligence", "Graph Based Representation" ], "name": "Vincenzo Carletti, Ph.D.", "organization": 8098754970055108159, "publications": [ { "abstract": "Graph matching is essential in several fields that use structured information, such as biology, chemistry, social networks, knowledge management, document analysis and others. Except for special classes of graphs, graph matching has in the worst-case an exponential complexity; however, there are algorithms that show an acceptable execution time, as long as the graphs are not too large and not too dense. In this paper we introduce a novel subgraph isomorphism algorithm, VF3, particularly efficient in the challenging case of graphs with thousands of nodes and a high edge density. Its performance, both in terms of time and memory, has been assessed on a large dataset of 12,700 random graphs with a size up to 10,000 nodes, made publicly available. VF3 has been compared with four other state-of-the-art algorithms, and the huge experimentation required more than two years of processing time. The results …", "author": "Vincenzo Carletti and Pasquale Foggia and Alessia Saggese and Mario Vento", "author_pub_id": "1iQtvdsAAAAJ:uJ-U7cs_P_0C", "citation": "IEEE transactions on pattern analysis and machine intelligence 40 (4), 804-818, 2017", "cites_per_year": { "2018": 10, "2019": 20, "2020": 21, "2021": 28, "2022": 31, "2023": 4 }, "journal": "IEEE transactions on pattern analysis and machine intelligence", "num_citations": 114, "number": "4", "pages": "804-818", "pub_url": "https://ieeexplore.ieee.org/abstract/document/7907163/", "pub_year": 2017, "publisher": "IEEE", "title": "Challenging the time complexity of exact subgraph isomorphism for huge and dense graphs with VF3", "url_related_articles": "/scholar?oi=bibs&hl=en&oe=ASCII&q=related:8y95I731FVoJ:scholar.google.com/", "volume": "40" }, { "abstract": "The actual research activity at MIVIA (Machine Intelligence for recognition of Video, Images and Audio) lab involves the study of innovative methods for behavioral analysis in surveillance videos and for events detection in audio streams, the development of techniques for biomedical images analysis and algorithms for graph matching. Moreover, in the last years, part of the research activity of the MIVIA lab is dedicated to the implementation of those approaches on embedded systems.", "author": "Vincenzo Carletti and Luca Del Pizzo and Rosario Di Lascio and Pasquale Foggia and Gennaro Percannella and Alessia Saggese and Nicola Strisciuglio and Mario Vento", "author_pub_id": "1iQtvdsAAAAJ:eQOLeE2rZwMC", "citation": "", "cites_per_year": {}, "num_citations": 0, "pub_url": "https://girpr2014.unisa.it/files/MIVIA2014.pdf", "title": "Research Activities@ MIVIA Lab", "url_related_articles": "/scholar?oi=bibs&hl=en&oe=ASCII&q=related:fXoMTa-V5boJ:scholar.google.com/" } ], "scholar_id": "1iQtvdsAAAAJ" }, { "affiliation": "Laboratoire d'Informatique Fondamentale et Appliquée de Tours (Polytech' Tours)", "citedby": 3319, "cites_per_year": { "2004": 12, "2005": 37, "2006": 31, "2007": 74, "2008": 68, "2009": 90, "2010": 112, "2011": 142, "2012": 158, "2013": 219, "2014": 244, "2015": 254, "2016": 257, "2017": 256, "2018": 225, "2019": 280, "2020": 240, "2021": 292, "2022": 244, "2023": 28 }, "coauthors": [ { "affiliation": "Professor of Computer Engineering, University of Salerno", "name": "Pasquale Foggia", "scholar_id": "P9eeLD8AAAAJ" }, ], "homepage": "https://www.univ-tours.fr/m-donatello-conte--554674.kjsp", "interests": [ "Structural Pattern Recognition", "Graph Matching", "Video Surveillance Systems", "Image Quality Assessment", "Affective Computing" ], "name": "Donatello Conte", "organization": 9820397017780423431, "publications": [ { "abstract": "A recent paper posed the question: \"Graph Matching: What are we really talking about?\". Far from providing a definite answer to that question, in this paper we will try to characterize the role that graphs play within the Pattern Recognition field. To this aim two taxonomies are presented and discussed. The first includes almost all the graph matching algorithms proposed from the late seventies, and describes the different classes of algorithms. The second taxonomy considers the types of common applications of graph-based techniques in the Pattern Recognition and Machine Vision field.", "author": "Donatello Conte and Pasquale Foggia and Carlo Sansone and Mario Vento", "author_pub_id": "dAKCYJgAAAAJ:u5HHmVD_uO8C", "citation": "International journal of pattern recognition and artificial intelligence 18 …, 2004", "cites_per_year": { "2004": 10, "2005": 29, "2006": 25, "2007": 60, "2008": 50, "2009": 62, "2010": 78, "2011": 97, "2012": 115, "2013": 127, "2014": 147, "2015": 157, "2016": 152, "2017": 144, "2018": 121, "2019": 128, "2020": 108, "2021": 109, "2022": 84, "2023": 9 }, "journal": "International journal of pattern recognition and artificial intelligence", "num_citations": 1834, "number": "03", "pages": "265-298", "pub_url": "https://www.worldscientific.com/doi/abs/10.1142/S0218001404003228", "pub_year": 2004, "publisher": "World Scientific Publishing Company", "title": "Thirty years of graph matching in pattern recognition", "url_related_articles": "/scholar?oi=bibs&hl=en&oe=ASCII&q=related:Up-aWBC_pBYJ:scholar.google.com/", "volume": "18" }, { "abstract": "3 Lyon Research Center for Images and Information Systems UMR CNRS 5205 Bat. J. Verne INSA Lyon 69621 Villeurbanne Cedex, France jolion@ rfv. insa-lyon. fr", "author": "Donatello Conte and Pasquale Foggia and Jean-Michel Jolion and Mario Vento", "author_pub_id": "dAKCYJgAAAAJ:MXK_kJrjxJIC", "citation": "", "cites_per_year": {}, "num_citations": 0, "pub_url": "https://scholar.google.com/scholar?cluster=1028334572382745959&hl=en&oi=scholarr", "title": "pyramides de graphes", "url_related_articles": "/scholar?oi=bibs&hl=en&q=related:Z3UgYdhgRQ4J:scholar.google.com/" } ], "scholar_id": "dAKCYJgAAAAJ" } ] ```
/scholar-scraper-1.0.11.tar.gz/scholar-scraper-1.0.11/README.md
0.660939
0.930962
README.md
pypi
import json from concurrent.futures import ThreadPoolExecutor from typing import List from scholarly import scholarly, ProxyGenerator from .CustomScholarlyTypes import SimplifiedAuthor from .utilities import JSONEncoder MAX_RETRIES = 10 def set_new_proxy(): """ Set a new proxy for the scholarly library. :return: The new proxy. """ pg = ProxyGenerator() for i in range(MAX_RETRIES): try: if pg.FreeProxies() and scholarly.use_proxy(pg): break except: pass return pg def getAuthorData(scholarId: str): """ Retrieve the author's data from Google Scholar. :param scholarId: The id of the author on Google Scholar. :return: The author's data. """ # Retrieve the author's data author = scholarly.search_author_id(scholarId) # Cast the author to Author object return SimplifiedAuthor(author) # Threaded function for queue processing. def crawl(scholarID: str): """ Crawl the author's data from Google Scholar. :param scholarID: A Google Scholar ID string. :return: The author's data or None if an error occurred. """ data = None # Try to get the data 10 times at most for i in range(MAX_RETRIES): try: data = getAuthorData(scholarID) break # If an error occurred, try again with a new proxy except: set_new_proxy() return data class ScholarScraper: """ :class:`ScholarScraper <ScholarScraper>` object used to retrieve the data of a list of authors from Google Scholar. """ def __init__(self, scholarIds: List[str] = [], max_threads: int = 10): """ :param scholarIds: The list of the ids of the authors on Google Scholar. :param max_threads: The maximum number of threads to use for the scraping process. """ self.scholarIds = scholarIds self.max_threads = max_threads self.authorsList = [] self.threads = [] def start_scraping(self, scholarIds: List[str] = None, max_threads: int = None): """ Start the scraping process. :param scholarIds: The list of the ids of the authors on Google Scholar. :param max_threads: The maximum number of threads to use for the scraping process. :return: The list of the authors' data as JSON. """ self.authorsList = [] self.threads = [] self.scholarIds = scholarIds if scholarIds else self.scholarIds self.max_threads = max_threads if max_threads else self.max_threads if self.max_threads == 1: for scholarId in self.scholarIds: self.authorsList.append(crawl(scholarId)) return json.dumps(self.authorsList, cls=JSONEncoder, sort_keys=True, indent=4, ensure_ascii=False) # Use many threads (self.max_threads max, or one for each scholarId) num_threads = min(self.max_threads, len(self.scholarIds)) # Initialize a thread pool executor with ThreadPoolExecutor(max_workers=num_threads) as executor: # Submit the crawl function to the thread pool executor with each work item futures = [executor.submit(crawl, work) for work in self.scholarIds] # Retrieve the results of each crawl function for future in futures: if future.result() is not None: self.authorsList.append(future.result()) return json.dumps(self.authorsList, cls=JSONEncoder, sort_keys=True, indent=4, ensure_ascii=False)
/scholar-scraper-1.0.11.tar.gz/scholar-scraper-1.0.11/scholar_scraper/ScholarScraper.py
0.793786
0.373533
ScholarScraper.py
pypi
[![Python package](https://github.com/scholarly-python-package/scholarly/workflows/Python%20package/badge.svg?branch=main)](https://github.com/scholarly-python-package/scholarly/actions?query=branch%3Amain) [![codecov](https://codecov.io/gh/scholarly-python-package/scholarly/branch/main/graph/badge.svg?token=0svtI9yVSQ)](https://codecov.io/gh/scholarly-python-package/scholarly) [![Documentation Status](https://readthedocs.org/projects/scholarly/badge/?version=latest)](https://scholarly.readthedocs.io/en/latest/?badge=latest) [![DOI](https://zenodo.org/badge/27442991.svg)](https://zenodo.org/badge/latestdoi/27442991) # scholarly scholarly is a module that allows you to retrieve author and publication information from [Google Scholar](https://scholar.google.com) in a friendly, Pythonic way without having to solve CAPTCHAs. ## Installation [![Anaconda-Server Badge](https://anaconda.org/conda-forge/scholarly/badges/version.svg)](https://anaconda.org/conda-forge/scholarly) [![PyPI version](https://badge.fury.io/py/scholarly.svg)](https://badge.fury.io/py/scholarly) `scholarly` can be installed either with `conda` or with `pip`. To install using `conda`, simply run ```bash conda install -c conda-forge scholarly ``` Alternatively, use `pip` to install the latest release from pypi: ```bash pip3 install scholarly ``` or `pip` to install from github: ```bash pip3 install -U git+https://github.com/scholarly-python-package/scholarly.git ``` We are constantly developing new features. Please update your local package regularly. `scholarly` follows [Semantic Versioning](https://semver.org/). This means your code that uses an earlier version of `scholarly` is guaranteed to work with newer versions. ### Optional dependencies - **Tor**: `scholarly` comes with a handful of APIs to set up proxies to circumvent anti-bot measures. Tor methods are deprecated since v1.5 and are not actively tested or supported. If you wish to use Tor, install `scholarly` using the `tor` tag as ```bash pip3 install scholarly[tor] ``` If you use `zsh` (which is now the default in latest macOS), you should type this as ```zsh pip3 install scholarly'[tor]' ``` **Note:** Tor option is unavailable with conda installation. ## Tests To check if your installation is succesful, run the tests by executing the `test_module.py` file as: ```bash python3 test_module ``` or ```bash python3 -m unittest -v test_module.py ``` ## Documentation Check the [documentation](https://scholarly.readthedocs.io/en/latest/?badge=latest) for a [complete API reference](https://scholarly.readthedocs.io/en/stable/scholarly.html) and a [quickstart guide](https://scholarly.readthedocs.io/en/stable/quickstart.html). ### Examples ```python from scholarly import scholarly # Retrieve the author's data, fill-in, and print # Get an iterator for the author results search_query = scholarly.search_author('Steven A Cholewiak') # Retrieve the first result from the iterator first_author_result = next(search_query) scholarly.pprint(first_author_result) # Retrieve all the details for the author author = scholarly.fill(first_author_result ) scholarly.pprint(author) # Take a closer look at the first publication first_publication = author['publications'][0] first_publication_filled = scholarly.fill(first_publication) scholarly.pprint(first_publication_filled) # Print the titles of the author's publications publication_titles = [pub['bib']['title'] for pub in author['publications']] print(publication_titles) # Which papers cited that publication? citations = [citation['bib']['title'] for citation in scholarly.citedby(first_publication_filled)] print(citations) ``` **IMPORTANT**: Making certain types of queries, such as `scholarly.citedby` or `scholarly.search_pubs`, will lead to Google Scholar blocking your requests and may eventually block your IP address. You must use proxy services to avoid this situation. See the ["Using proxies" section](https://scholarly.readthedocs.io/en/stable/quickstart.html#using-proxies) in the documentation for more details. Here's a short example: ```python from scholarly import ProxyGenerator # Set up a ProxyGenerator object to use free proxies # This needs to be done only once per session pg = ProxyGenerator() pg.FreeProxies() scholarly.use_proxy(pg) # Now search Google Scholar from behind a proxy search_query = scholarly.search_pubs('Perception of physical stability and center of mass of 3D objects') scholarly.pprint(next(search_query)) ``` `scholarly` also has APIs that work with several premium (paid) proxy services. `scholarly` is smart enough to know which queries need proxies and which do not. It is therefore recommended to always set up a proxy in the beginning of your application. #### Disclaimer The developers use `ScraperAPI` to run the tests in Github Actions. The developers of `scholarly` are not affiliated with any of the proxy services and do not profit from them. If your favorite service is not supported, please submit an issue or even better, follow it up with a pull request. ## Contributing We welcome contributions from you. Please create an issue, fork this repository and submit a pull request. Read the [contributing document](.github/CONTRIBUTING.md) for more information. ## Acknowledging `scholarly` If you have used this codebase in a scientific publication, please cite this software as following: ```bibtex @software{cholewiak2021scholarly, author = {Cholewiak, Steven A. and Ipeirotis, Panos and Silva, Victor and Kannawadi, Arun}, title = {{SCHOLARLY: Simple access to Google Scholar authors and citation using Python}}, year = {2021}, doi = {10.5281/zenodo.5764801}, license = {Unlicense}, url = {https://github.com/scholarly-python-package/scholarly}, version = {1.5.1} } ``` ## License The original code that this project was forked from was released by [Luciano Bello](https://github.com/lbello/chalmers-web) under a [WTFPL](http://www.wtfpl.net/) license. In keeping with this mentality, all code is released under the [Unlicense](http://unlicense.org/).
/scholarly-1.7.10.tar.gz/scholarly-1.7.10/README.md
0.708313
0.968381
README.md
pypi
import sys import json def walk(x, action, format, meta): """Walk a tree, applying an action to every object. Returns a modified tree. """ if isinstance(x, list): array = [] for item in x: if isinstance(item, dict) and 't' in item: res = action(item['t'], item['c'], format, meta) if res is None: array.append(walk(item, action, format, meta)) elif isinstance(res, list): for z in res: array.append(walk(z, action, format, meta)) else: array.append(walk(res, action, format, meta)) else: array.append(walk(item, action, format, meta)) return array elif isinstance(x, dict): obj = {} for k in x: obj[k] = walk(x[k], action, format, meta) return obj else: return x def toJSONFilter(action): """Converts an action into a filter that reads a JSON-formatted pandoc document from stdin, transforms it by walking the tree with the action, and returns a new JSON-formatted pandoc document to stdout. The argument is a function action(key, value, format, meta), where key is the type of the pandoc object (e.g. 'Str', 'Para'), value is the contents of the object (e.g. a string for 'Str', a list of inline elements for 'Para'), format is the target output format (which will be taken for the first command line argument if present), and meta is the document's metadata. If the function returns None, the object to which it applies will remain unchanged. If it returns an object, the object will be replaced. If it returns a list, the list will be spliced in to the list to which the target object belongs. (So, returning an empty list deletes the object.) """ doc = json.loads(sys.stdin.read()) if len(sys.argv) > 1: format = sys.argv[1] else: format = "" altered = walk(doc, action, format, doc[0]['unMeta']) json.dump(altered, sys.stdout) def stringify(x): """Walks the tree x and returns concatenated string content, leaving out all formatting. """ result = [] def go(key, val, format, meta): if key in ['Str', 'MetaString']: result.append(val) elif key == 'Code': result.append(val[1]) elif key == 'Math': result.append(val[1]) elif key == 'NumRef': result.append(val[1]) elif key == 'LineBreak': result.append(" ") elif key == 'Space': result.append(" ") walk(x, go, "", {}) return ''.join(result) def attributes(attrs): """Returns an attribute list, constructed from the dictionary attrs. """ attrs = attrs or {} ident = attrs.get("id", "") classes = attrs.get("classes", []) keyvals = [[x, attrs[x]] for x in attrs if (x != "classes" and x != "id")] return [ident, classes, keyvals] def elt(eltType, numargs): def fun(*args): lenargs = len(args) if lenargs != numargs: raise ValueError(eltType + ' expects ' + str(numargs) + ' arguments, but given ' + str(lenargs)) if numargs == 0: xs = [] elif len(args) == 1: xs = args[0] else: xs = args return {'t': eltType, 'c': xs} return fun # Constructors for block elements Plain = elt('Plain', 1) Para = elt('Para', 1) CodeBlock = elt('CodeBlock', 2) RawBlock = elt('RawBlock', 2) BlockQuote = elt('BlockQuote', 1) OrderedList = elt('OrderedList', 2) BulletList = elt('BulletList', 1) DefinitionList = elt('DefinitionList', 1) Header = elt('Header', 3) HorizontalRule = elt('HorizontalRule', 0) Table = elt('Table', 5) Figure = elt('Figure', 5) ImageGrid = elt('ImageGrid', 1) Statement = elt('Statement', 2) Proof = elt('Proof', 2) Div = elt('Div', 2) Null = elt('Null', 0) # Constructors for inline elements Str = elt('Str', 1) Emph = elt('Emph', 1) Strong = elt('Strong', 1) Strikeout = elt('Strikeout', 1) Superscript = elt('Superscript', 1) Subscript = elt('Subscript', 1) SmallCaps = elt('SmallCaps', 1) Quoted = elt('Quoted', 2) Cite = elt('Cite', 2) NumRef = elt('NumRef', 2) Code = elt('Code', 2) Space = elt('Space', 0) LineBreak = elt('LineBreak', 0) Math = elt('Math', 2) RawInline = elt('RawInline', 2) Link = elt('Link', 2) Image = elt('Image', 3) Note = elt('Note', 1) Span = elt('Span', 2)
/scholdocfilters-0.1.1.tar.gz/scholdocfilters-0.1.1/scholdocfilters.py
0.526099
0.431045
scholdocfilters.py
pypi
from math import pi as _pi from ._if_not_valid_raise import (_if_not_int_or_float_raise, _if_not_positive_raise) def circle_area(r): """Calculates the area of a trapezium using the formula: area = \u03C0(radius squared) Parameters ---------- r: int or float The radius in the equation. Returns ------- Float \u03C0 * (r**2) Raises ------ ValueError If r:: Is not an integer or float. Is not positive. Examples -------- >>> school_algorithms.circle_area(10) 314.1592653589793 """ _if_not_int_or_float_raise(r) _if_not_positive_raise(r) return _pi * (r**2) def circumference(r): """Calculates the circumference of a cirle using the formula: 2\u03C0radius Parameters ---------- r: int or float The radius in the equation. Returns ------- Float \u03C0 * r * 2 Raises ------ ValueError If r:: Is not an integer or float. Is not positive. Examples -------- >>> school_algorithms.circumference(5) 31.415926535898 """ _if_not_int_or_float_raise(r) _if_not_positive_raise(r) return _pi * r * 2 def circumference2(d): """Calculates the circumference of a cirle using the formula: \u03C0 * diameter Parameters ---------- d: int or float The diameter in the equation. Returns ------- Float \u03C0 * d Raises ------ ValueError If d:: Is not an integer or float. Is not positive. Examples -------- >>> school_algorithms.circumference2(10) 31.415926535898 """ _if_not_int_or_float_raise(d) _if_not_positive_raise(d) return _pi * d def area_of_sector(r, a): """A function that calculates the area of a sector in a circle: angle/360 * \u03C0 * (r squared) Parameters ---------- r: int or float The radius in the equation. a: int or float The angle in the equation. Returns ------- Float a/360 * \u03C0 * r**2 Raises ------ ValueError If a or r:: Is not an integer or float. Is not positive. Examples -------- >>> school_algorithms.area_of_sector(5, 40) 8.726646259971647 """ _if_not_int_or_float_raise(r, a) _if_not_positive_raise(r, a) return a/360 * _pi * r**2
/school_algorithms-1.14.0.tar.gz/school_algorithms-1.14.0/src/school_algorithms/circle.py
0.950881
0.549338
circle.py
pypi
from ._if_not_valid_raise import _if_not_int_or_float_raise def power_calc(E, t): """Calculates power from energy and time using the formula: power = energy / time Parameters ---------- E : int or float The energy value in the equation. t : int or float The time value of the equation (seconds). Returns ------- Float E / t Raises ------ ValueError If E or t is not an integer or float. Examples -------- >>> school_algorithms.power_calc(10, 5) 2.0 """ _if_not_int_or_float_raise(E, t) return E / t def energy_calc(p, t): """ Calculates energy from power and time using the formula: energy = power * time Parameters ---------- p: Int or float The power value of the equation. t: Int or float The time value of the equation (seconds). Returns ------- Int p * t Raises ------ ValueError If p or t is not an integer or float. Examples -------- >>> school_algorithms.energy_calc(5, 2) 10 """ _if_not_int_or_float_raise(p, t) return p * t def time_calc(p, E): """ Calculates time from power and energy using the formula: time = energy / power Parameters ---------- p: int or float The power value of the equation. E: int or float The energy value of the equaton. Returns ------- Float E / p Raises ------ ValueError If p or E is not an integer or float. Examples -------- >>> school_algorithms.energy_calc(10, 2) 5.0 """ _if_not_int_or_float_raise(p, E) return E / p def epe_calc(k, e): """Calculates Elastic Potential Energy using the formula: E = 0.5(spring constant(extension squared)) Parameters ---------- k: int or float The spring constant in the equation. e: int or float The extension in the equation. Returns ------- Float 0.5 * (k * (e**2)) Raises ------ ValueError If k or e is not an integer or a float. Examples -------- >>> school_algorithms.epe_calc(5, 10) 250 """ _if_not_int_or_float_raise(k, e) return 0.5 * (k * (e**2)) def kinetic_calc(s ,m): """Calculates Elastic Potential Energy using the formula: kinetic energy = 0.5 × mass × (speed)^2 Parameters ---------- s: int or float The speed value in the equation. m: int or float The mass value in the equation. Returns ------- Float 0.5 * m * s**2 Raises ------ ValueError If m or s is not an integer or a float. Examples -------- >>> school_algorithms.kinetic_calc(5, 10) 125 """ _if_not_int_or_float_raise(m, s) return 0.5 * m * s**2
/school_algorithms-1.14.0.tar.gz/school_algorithms-1.14.0/src/school_algorithms/physics.py
0.960352
0.774711
physics.py
pypi
__author__ = "Marc-Olivier Derouin" __email__ = "[email protected]" import re from typing import List, Tuple import unittest from abc import ABC, abstractmethod class Equality(ABC): """Abstract base class for equality definitions""" def __init__(self, expected: str): """Initialize the class with the expected value. Args: expected: The expected value as a string. """ self.expected = expected @abstractmethod def validate(self, test_case: unittest.TestCase, value_to_test: str, fail_message: str) -> None: """Validate equality between `value_to_test` and the expected value. Args: test_case: Instance of `unittest.TestCase` to run the equality check. value_to_test: The value to test for equality. fail_message: The message to display if the equality check fails. """ pass class AlmostEqual(Equality): """Abstract base class for almost equal validation.""" pass class AlmostEqualString(AlmostEqual): """Class for almost equal string validation.""" def __init__(self, expected: str, max_distance: int = 2): """Initialize the class with the expected value and the Levenshtein distance. Args: expected: The expected value as a string. distance: The Levenshtein distance. Defaults to 2. """ super().__init__(expected) self.max_distance = max_distance @staticmethod def _levenshtein(s1: str, s2: str) -> int: """Calculate the Levenshtein distance between two strings. Args: s1: The first string. s2: The second string. Returns: The Levenshtein distance between `s1` and `s2`. """ # Create a matrix with dimensions (length of s1 + 1) x (length of s2 + 1) matrix = [[0] * (len(s2) + 1) for _ in range(len(s1) + 1)] # Initialize the first row and column of the matrix for i in range(len(s1) + 1): matrix[i][0] = i for j in range(len(s2) + 1): matrix[0][j] = j # Fill in the rest of the matrix for i in range(1, len(s1) + 1): for j in range(1, len(s2) + 1): cost = 0 if s1[i - 1] == s2[j - 1] else 1 matrix[i][j] = min( matrix[i - 1][j] + 1, # deletion matrix[i][j - 1] + 1, # insertion matrix[i - 1][j - 1] + cost # substitution ) # The final value in the bottom-right corner of the matrix is the Levenshtein distance return matrix[-1][-1] def validate(self, test_case: unittest.TestCase, value_to_test: str, fail_message: str) -> None: """Validate almost equal string equality between `value_to_test` and the expected value. Args: test_case: Instance of `unittest.TestCase` to run the equality check. value_to_test: The value to test for equality. fail_message: The message to display if the equality check fails. """ test_case.assertLessEqual(AlmostEqualString._levenshtein(value_to_test, self.expected), self.max_distance, fail_message) class AlmostEqualNumber(AlmostEqual): """Class for almost equal numerical validation.""" def __init__(self, expected: str, precision: int = 7): """Initialize the class with the expected value and the precision values. Args: expected: The expected value as a string. precisions: The number of decimal places to compare. Defaults to 7. """ super().__init__(expected) self.precision = precision def validate(self, test_case: unittest.TestCase, value_to_test: str, fail_message: str) -> None: """Validate almost equal numerical equality between `value_to_test` and the expected value. Args: test_case: Instance of `unittest.TestCase` to run the equality check. value_to_test: The value to test for equality. fail_message: The message to display if the equality check fails. """ expected_numbers = list(map(float, re.findall(r'[-+]?\d*\.\d+|\d+', self.expected))) result_numbers = list(map(float, re.findall(r'[-+]?\d*\.\d+|\d+', value_to_test))) if len(expected_numbers) != len(result_numbers): raise AssertionError(f"Expected {len(expected_numbers)} numbers, but got {len(result_numbers)}") for expected_number, result_number in zip(expected_numbers, result_numbers): test_case.assertAlmostEqual(float(expected_number), float(result_number), self.precision, fail_message) class Equal(Equality): """Abstract base class for equality validation.""" @staticmethod @abstractmethod def alter_expected_and_value_to_test(expected: str, value_to_test: str) -> Tuple[str, str]: """Alter the expected and value_to_test values before running the equality check. Args: value_to_test: The value to test for equality. Returns: A tuple containing the expected and value_to_test values after alteration. """ pass def validate(self, test_case: unittest.TestCase, value_to_test: str, fail_message: str) -> None: """Validate after altering . Args: test_case: Instance of `unittest.TestCase` to run the equality check. value_to_test: The value to test for equality. fail_message: The message to display if the equality check fails. """ expected, value_to_test = self.alter_expected_and_value_to_test(self.expected, value_to_test) test_case.assertEqual(expected, value_to_test, fail_message) def CombineEqualities(*equalities: List[Equal]) -> Equal: """Combine multiple equalities into a single equality. Args: *equalities: The equalities to combine. Returns: A single equality that combines all of the provided equalities. """ class CombinedEquality(Equal): """Class for combined equality validation.""" def __init__(self, expected: str): """Initialize the class with the expected string value. Args: expected: The expected string value. """ super().__init__(expected) @staticmethod def alter_expected_and_value_to_test(expected: str, value_to_test: str) -> Tuple[str, str]: """Alter the expected and value_to_test values before running the equality check. Args: value_to_test: The value to test for equality. Returns: A tuple containing the expected and value_to_test values after alteration. """ for equality in equalities: if not isinstance(equality, Equal): raise TypeError(f"Expected an instance of Equal, but got {type(equality)}") expected, value_to_test = equality.alter_expected_and_value_to_test(expected, value_to_test) return expected, value_to_test return CombinedEquality class CaseInsensitiveStringEquality(Equal): """Class for case-insensitive string equality validation.""" def __init__(self, expected: str): """Initialize the class with the expected string value. Args: expected: The expected string value. """ super().__init__(expected) @staticmethod def alter_expected_and_value_to_test(expected: str, value_to_test: str) -> Tuple[str, str]: """Alter the expected and value_to_test values before running the equality check by converting both to lowercase. Args: expected: The expected value. value_to_test: The value to test for equality. Returns: A tuple containing the expected and value_to_test values after alteration by converting both to lowercase. """ return expected.lower(), value_to_test.lower() class WhiteSpaceInsensitiveEquality(Equal): """Class for space-insensitive equality validation.""" def __init__(self, expected: str): """Initialize the class with the expected string value. Args: expected: The expected string value. """ super().__init__(expected) @staticmethod def alter_expected_and_value_to_test(expected: str, value_to_test: str) -> Tuple[str, str]: """Alter the expected and value_to_test values before running the equality check by removing all whitespace. Args: expected: The expected value. value_to_test: The value to test for equality. Returns: A tuple containing the expected and value_to_test values after alteration by removing all whitespace. """ return re.sub(r"\s+", "", expected), re.sub(r"\s+", "", value_to_test) class ContainsEquality(Equal): """Class for contains equality validation. Checks if the expected value is contained in the value to test.""" def __init__(self, expected: str): """Initialize the class with the expected string value. Args: expected: The expected string value. This value will be checked to see if it is contained in the value to test. """ super().__init__(expected) @staticmethod def _find_largest_substring(word: str, target: str) -> str: word_length = len(word) target_length = len(target) # Create a table to store the lengths of common substrings table: List[List[int]] = [[0] * (target_length + 1) for _ in range(word_length + 1)] # Variables to track the longest common substring and its length max_length = 0 end_index = 0 # Fill the table using dynamic programming for i in range(1, word_length + 1): for j in range(1, target_length + 1): if word[i - 1] == target[j - 1]: table[i][j] = table[i - 1][j - 1] + 1 if table[i][j] > max_length: max_length = table[i][j] end_index = i # Extract the longest common substring largest_substring = word[end_index - max_length:end_index] return largest_substring @staticmethod def alter_expected_and_value_to_test(expected: str, value_to_test: str) -> Tuple[str, str]: """Alter the expected and value_to_test values before running the equality check by keeping only the expected value if it is contained in the value to test. Args: expected: The expected value. value_to_test: The value to test for equality. Returns: A tuple containing the expected and value_to_test values after alteration by keeping only the expected value if it is contained in the value to test. """ if expected in value_to_test: return expected, expected else: # Return the biggest substring of the value to test that is contained in the expected value. return expected, ContainsEquality._find_largest_substring(value_to_test, expected)
/school_grader-4.0.9.tar.gz/school_grader-4.0.9/src/school_grader/equality.py
0.927429
0.779867
equality.py
pypi
class APIError(Exception): """Обработка всех типов ошибок""" NAME = "APIError" def __init__(self, url: str, status_code: int, description: str | None = None) -> None: error_text = f"API-Error | {status_code}:{self.NAME}:\nURL: {url}" if description: error_text += f"\n{description}" super().__init__(error_text) self.url = url self.status_code = status_code self.description = description class HTMLError(APIError): """HTML отраженная ошибка.""" NAME = "HTMLError" class InvalidRequest(APIError): """Неверный запрос""" NAME = "InvalidRequest" class ParameterInvalid(APIError): """Неверное значение одного из параметров""" NAME = "ParameterInvalid" class ApiResourceUnavailable(APIError): """Ресурс не существует""" NAME = "ApiResourceUnavailable" class ApiMethodNotSupported(APIError): """Метод не поддерживается для этого ресурса""" NAME = "ApiMethodNotSupported" class ApiRequestLimit(APIError): """Превышен лимит запросов для данного токена""" NAME = "ApiRequestLimit" class ApiServerError(APIError): """Ошибка на сервере""" NAME = "ApiServerError" class ApiHttpsRequired(APIError): """Ресурс доступен только через https""" NAME = "ApiHttpsRequired" class AuthorizationInvalidToken(APIError): """Неверный или неактивный токен""" NAME = "AuthorizationInvalidToken" class AuthorizationTokenRequired(APIError): """Для доступа к ресурсу нужна авторизация""" NAME = "AuthorizationTokenRequired" class AuthorizationOutOfScope(APIError): """Токен содержит недостаточно прав доступа """ NAME = "AuthorizationOutOfScope" class AuthorizationNotOwner(APIError): """Текущий пользователь не является владельцем ресурса""" NAME = "AuthorizationNotOwner" class AuthorizationOwnerForbidden(APIError): """Данный запрос запрещён владельцем ресурса""" NAME = "AuthorizationOwnerForbidden" class AuthorizationSystemForbidden(APIError): """Данный запрос запрещён правилами доступа системы""" NAME = "AuthorizationSystemForbidden" all_error_types_str = [ 'HTMLError', 'InvalidRequest', 'ParameterInvalid', 'ApiResourceUnavailable', 'ApiMethodNotSupported', 'ApiRequestLimit', 'ApiServerError', 'ApiHttpsRequired', 'AuthorizationInvalidToken', 'AuthorizationTokenRequired', 'AuthorizationOutOfScope', 'AuthorizationNotOwner', 'AuthorizationOwnerForbidden', 'AuthorizationSystemForbidden', ] all_error_types_str_ = [ 'HTMLError', 'invalidRequest', 'parameterInvalid', 'apiResourceUnavailable', 'apiMethodNotSupported', 'apiRequestLimit', 'apiServerError', 'apiHttpsRequired', 'authorizationInvalidToken', 'authorizationTokenRequired', 'authorizationOutOfScope', 'authorizationNotOwner', 'authorizationOwnerForbidden', 'authorizationSystemForbidden', 'invalidToken', 'tokenRequired', 'outOfScope', 'notOwner', 'ownerForbidden', 'systemForbidden', 'resourceUnavailable', 'methodNotSupported', 'requestLimit', 'serverError', 'httpsRequired', ] all_error_types_str__dict = { 'HTMLError': "HTMLError", 'invalidRequest': "InvalidRequest", 'parameterInvalid': "ParameterInvalid", 'apiResourceUnavailable': "ApiResourceUnavailable", 'apiMethodNotSupported': "ApiMethodNotSupported", 'apiRequestLimit': "ApiRequestLimit", 'apiServerError': "ApiServerError", 'apiHttpsRequired': "ApiHttpsRequired", 'authorizationInvalidToken': "AuthorizationInvalidToken", 'authorizationTokenRequired': "AuthorizationTokenRequired", 'authorizationOutOfScope': "AuthorizationOutOfScope", 'authorizationNotOwner': "AuthorizationNotOwner", 'authorizationOwnerForbidden': "AuthorizationOwnerForbidden", 'authorizationSystemForbidden': "AuthorizationSystemForbidden", 'invalidToken': "AuthorizationInvalidToken", 'tokenRequired': "AuthorizationTokenRequired", 'outOfScope': "AuthorizationOutOfScope", 'notOwner': "AuthorizationNotOwner", 'ownerForbidden': "AuthorizationOwnerForbidden", 'systemForbidden': "AuthorizationSystemForbidden", 'resourceUnavailable': "ApiResourceUnavailable", 'methodNotSupported': "ApiMethodNotSupported", 'requestLimit': "ApiRequestLimit", 'serverError': "ApiServerError", 'httpsRequired': "ApiHttpsRequired", "unknownError": "UnknownError", } all_error_types = { "HTMLError": HTMLError, "InvalidRequest": InvalidRequest, "ParameterInvalid": ParameterInvalid, "ApiResourceUnavailable": ApiResourceUnavailable, "ApiMethodNotSupported": ApiMethodNotSupported, "ApiRequestLimit": ApiRequestLimit, "ApiServerError": ApiServerError, "ApiHttpsRequired": ApiHttpsRequired, "AuthorizationInvalidToken": AuthorizationInvalidToken, "AuthorizationTokenRequired": AuthorizationTokenRequired, "AuthorizationOutOfScope": AuthorizationOutOfScope, "AuthorizationNotOwner": AuthorizationNotOwner, "AuthorizationOwnerForbidden": AuthorizationOwnerForbidden, "AuthorizationSystemForbidden": AuthorizationSystemForbidden, "UnknownError": APIError } def raise_error(url: str, status_code: int, error_type: str, description: str | None = None): raise all_error_types[all_error_types_str__dict[error_type]](url, status_code, description)
/school_mosreg_api-0.9.4.tar.gz/school_mosreg_api-0.9.4/school_mosreg_api/exceptions.py
0.471467
0.173533
exceptions.py
pypi
from datetime import date, datetime from typing import Any, Optional from .base import BaseAPI from .. import types class SchoolMosregRUAPI(BaseAPI): """Основной sync класс почти со всеми функциями API.\n~~~""" def check_person(self, value): if value == "me": return (self.get_user()).personId else: return value def check_user(self, value): if value == "me": return (self.get_user()).id else: return value def get_me_organizations(self) -> Optional[list[int]]: """[GET] users/me/organizations Список идентификаторов организаций текущего пользователя Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Authorities/Authorities_GetOwnOrganizations """ return self.get("users/me/organizations", return_json=True) def get_organization(self, organizationId: int | str) -> types.Organization: """[GET] users/me/organizations/{organizationId} Данные указанной организации пользователя. Параметры: organizationId: Идентификатор организации (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Authorities/Authorities_GetOrganizationInfo """ return self.get(f"users/me/organizations/{organizationId}", model=types.Organization) def get_token_with_code(self, code: str, client_id: str, client_secret: str, grant_type: str, refreshToken: str) -> types.TokenWithCode: """[POST] authorizations Обменять код доступа на токен Параметры: code: ``str`` client_id: ``str`` client_secret: ``str`` grant_type: ``str`` refreshToken: ``str`` Параметры запроса [POST] method="authorizations": data: Код доступа -> Пример: ``{`` ``"code": "string",`` ``"client_id": "00000000-0000-0000-0000-000000000000",`` ``"client_secret": "00000000-0000-0000-0000-000000000000",`` ``"grant_type": "NotSet",`` ``"refreshToken": "string"`` ``}`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Authorizations/Authorizations_PostTokenRequestCode """ return self.post("authorizations", model=types.TokenWithCode, data={ "code": code, "client_id": client_id, "client_secret": client_secret, "grant_type": grant_type, "refreshToken": refreshToken}) def get_person_avg_marks(self, person: int | str, period) -> Optional[str]: """[GET] persons/{person}/reporting-periods/{period}/avg-mark Оценка персоны за отчетный период Параметры: person: id персоны (``"me"``, для себя) period: id отчетного периода Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_GetByPersonAndPeriod """ return self.get(f"persons/{self.check_person(person)}/reporting-periods/{period}/avg-mark", return_json=True) def get_person_avg_marks_by_subject(self, person: int | str, period, subject) -> Optional[str]: """[GET] persons/{person}/reporting-periods/{period}/subjects/{subject}/avg-mark Оценка персоны по предмету за отчетный период Параметры: person: id персоны (``"me"``, для себя) period: id отчетного периода subject: id предмета Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_GetByPersonAndPeriodAndSubject """ return self.get(f"persons/{self.check_person(person)}/reporting-periods/{period}/subjects/{subject}/avg-mark", return_json=True) def get_eduGroup_avg_marks_to_date(self, group: int | str, period: int | str, date: datetime | date) -> Optional[list[dict[str, Any]]]: """[GET] edu-groups/{group}/reporting-periods/{period}/avg-marks/{date} Оценки учебной группы по предмету за отчетный период до определенной даты Параметры: group: id учебной группы (``EduGroup``) (``int`` / ``str``) period: id отчетного периода (``int`` / ``str``) date: дата (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_GetByGroupAndPeriodOnDate """ return self.get(f"edu-groups/{group}/reporting-periods/{period}/avg-marks/{self.datetime_to_string(date)}", return_json=True) def get_eduGroup_avg_marks(self, group: int | str, from_: datetime | date, to: datetime | date) -> Optional[list[dict[str, Any]]]: """[GET] edu-groups/{group}/avg-marks/{from}/{to} Оценки учебной группы за период Параметры: group: id учебной группы (``EduGroup``) (``int`` / ``str``) from_: id начало периода (``datetime.datetime`` / ``datetime.date``) to: id конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_ListByGroupAndDates """ return self.get(f"edu-groups/{group}/avg-marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", return_json=True) def get_user_childrens(self, userID: str | int = "me") -> Optional[list[types.Children]]: """[GET] user/{userID}/children Получение списка детей по идентификатору родительского пользователя Параметры: userID: 'id' пользователя (``str`` / ``int``) (``"me"``, для себя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Children/Children_GetChildrenByUserID """ return self.get(f"user/{self.check_user(userID)}/children", model=types.Children, is_list=True) def get_person_childrens(self, personID: str | int = "me") -> list[types.Children] | None: """[GET] person/{personID}/children Получение списка детей по идентификатору родительской персоны Параметры: personID: 'id' пользователя (``str`` / ``int``) (``"me"``, для себя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Children/Children_GetChildrenByPersonID """ return self.get(f"person/{self.check_person(personID)}/children", model=types.Children, is_list=True) def get_me_classmates(self) -> list[int] | None: """[GET] users/me/classmates Список id пользователей одноклассников текущего пользователя, если он является учеником, либо список активных участников образовательных групп пользователя во всех остальных случаях Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Classmates/Classmates_GetOwnClassmates """ return self.get("users/me/classmates", return_json=True) def get_context(self, userId: str | int = "me") -> types.Context: """[GET] users/{userId}/context | users/me/context Получение контекстной информации по пользователю Параметры: userId: id пользователя ("me" или оставьте пустым для себя) (int/str) Права доступа: ``CommonInfo``, ``FriendsAndRelatives``, ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Context """ return self.get(f"users/{userId}/context", model=types.Context) def get_user_school_memberships(self, user: int | str = "me") -> types.SchoolMemberships: """[GET] users/{user}/school-memberships Список участий в школах для произвольного пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetByUser """ return self.get(f"users/{user}/school-memberships", model=types.SchoolMemberships) def get_user_education(self, user: int | str = "me") -> types.SchoolMemberships: """[GET] users/{user}/education Список участий в школах для произвольного пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetByUser_0 """ return self.get(f"users/{self.check_user(user)}/education", model=types.SchoolMemberships) def get_person_education(self, person: int | str = "me") -> list[types.SchoolMemberships]: """[GET] persons/{person}/school-memberships Список участий в школах для произвольного пользователя Параметры: person: id персоны (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetSchoolMembershipsByPerson """ return self.get(f"persons/{self.check_person(person)}/school-memberships", model=types.SchoolMemberships) def get_user_schools(self, user: str | int = "me") -> list[int] | None: """[GET] users/{user}/schools | users/me/schools Список идентификаторов школ произвольного/текущего пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetSchoolsByUser """ return self.get(f"users/{user}/schools", return_json=True) def get_user_eduGroups(self, user: str | int = "me") -> list[int] | None: """[GET] users/{user}/edu-groups | users/me/edu-groups Список идентификаторов классов произвольного/текущего пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetEduGroupsByUser """ return self.get(f"users/{user}/edu-groups", return_json=True) def get_eduGroup(self, eduGroup: int | str) -> types.EduGroup: """[GET] edu-groups/{eduGroup} Класс или учебная группа Параметры: eduGroup: id класса или образовательной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_Get """ return self.get(f"edu-groups/{eduGroup}", model=types.EduGroup) def get_school_eduGroups(self, school: str | int) -> list[types.EduGroup]: """[GET] schools/{school}/edu-groups Список классов в школе Параметры: school: id школы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_GetBySchool """ return self.get(f"schools/{school}/edu-groups", model=types.EduGroup, is_list=True) def get_person_eduGroups(self, person: str | int = "me") -> list[types.EduGroup]: """[GET] persons/{person}/edu-groups Учебные группы персоны Параметры: person: id персоны (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_GetByPerson """ return self.get(f"persons/{self.check_person(person)}/edu-groups", model=types.EduGroup, is_list=True) def get_eduGroup_persons(self, eduGroup: int | str) -> list[types.Person] | None: """[GET] edu-groups/{eduGroup}/persons Список учеников учебной группы Параметры: eduGroup: id учебной группы (int/str) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_GetGroupPersons """ return self.get(f"edu-groups/{eduGroup}/persons", model=types.Person, is_list=True) def get_parallel_eduGroups(self, groupId: int | str) -> list[types.EduGroup]: """[GET] edu-groups/{groupId}/parallel Учебные группы персоны Параметры: person: id персоны (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_ListParallelGroups """ return self.get(f"edu-groups/{groupId}/parallel", model=types.EduGroup, is_list=True) def get_homeworks_by_period(self, school: str | int, startDate: datetime | date, endDate: datetime | date) -> types.HomeWork: """[GET] users/me/school/{school}/homeworks?startDate={startDate}&endDate={endDate} Получить домашние задания пользователя за период времени Параметры: school: id школы (``int`` / ``str``) startDate: дата начало периода (``datetime.datetime`` / ``datetime.date``) endDate: дата конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Homeworks/Homeworks_ListUserHomeworksByPeriod """ return self.get(f"users/me/school/{school}/homeworks?startDate={self.datetime_to_string(startDate)}&endDate={self.datetime_to_string(endDate)}", model=types.HomeWork) def get_homeworks_by_Ids(self, ids: int | str | list[int | str]) -> types.HomeWork: """[GET] users/me/school/homeworks?homeworkId={ids} Получить домашние задания по идентификаторам Параметры: ids: work-id домашнего задания (допускается лист) (``int`` / ``str`` / ``list[str / int]``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Homeworks/Homeworks_GetUserHomeworkByIds """ return self.get(f"users/me/school/homeworks?homeworkId={ids if (isinstance(ids, int) or isinstance(ids, str)) else '&homeworkId='.join(ids)}", model=types.HomeWork) def get_lesson_log_entries(self, lesson: str | int) -> list[types.LessonLogEntries] | None: """[GET] lessons/{lesson}/log-entries Список отметок о посещаемости на уроке Параметры: lesson: id урока (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_GetByLesson """ return self.get(f"lessons/{lesson}/log-entries", model=types.LessonLogEntries, is_list=True) def get_person_lesson_log_entries(self, lesson: str | int, person: str | int = "me") -> types.LessonLogEntries: """[GET] lesson-log-entries/lesson/{lesson}/person/{person} Отметка о посещаемости ученика на уроке Параметры: lesson: id урока (int / str) person: id персоны ("me" или оставьте пустым для себя) (int/str) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_Get """ return self.get(f"lesson-log-entries/lesson/{lesson}/person/{self.check_person(person)}/edu-groups", model=types.LessonLogEntries) def get_eduGroup_lessons_log_entries(self, eduGroup: str | int, subject: str | int, from_: datetime | date, to: datetime | date) -> list[types.LessonLogEntries]: """[GET] lesson-log-entries/group/{eduGroup}?subject={subject}&from={from_}&to={to} Список отметок о посещаемости на уроках по заданному предмету в классе за интервал времени Параметры: eduGroup: id учебной группы / класса (``int`` / ``str``) subject: id предмета (``int`` / ``str``) from_: начало интервала (``datetime.datetime`` / ``datetime.date``) to: конец интервала (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_Get """ return self.get(f"lesson-log-entries/group/{eduGroup}?subject={subject}&from={self.datetime_to_string(from_)}&to={self.datetime_to_string(to)}", model=types.LessonLogEntries, is_list=True) def get_person_lessons_log_entries_by_subject(self, personID: str | int, subjectID: str | int, from_: datetime | date, to: datetime | date) -> list[types.LessonLogEntries]: """[GET] lesson-log-entries/person={personID}&subject={subjectID}&from={from}&to={to} Список отметок о посещаемости обучающегося по предмету за интервал времени Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало интервала (``datetime.datetime`` / ``datetime.date``) to: конец интервала (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_ListByPersonAndSubjectAndDateRange_0 """ return self.get(f"lesson-log-entries/person={self.check_person(personID)}&subject={subjectID}&from={self.datetime_to_string(from_)}&to={self.datetime_to_string(to)}", model=types.LessonLogEntries, is_list=True) def get_person_lessons_log_entries(self, person: str | int, from_: datetime | date, to: datetime | date) -> list[types.LessonLogEntries]: """[GET] persons/{person}/lesson-log-entries&from={from}&to={to} Список отметок о посещаемости обучающегося за интервал времени Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало интервала (``datetime.datetime`` / ``datetime.date``) to: конец интервала (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_GetByPersonAndPeriod """ return self.get(f"persons/{self.check_person(person)}/lesson-log-entries&from={self.datetime_to_string(from_)}&to={self.datetime_to_string(to)}", model=types.LessonLogEntries, is_list=True) def get_lesson(self, lesson: str | int) -> types.Lesson: """[GET] lesssons/{lesson} Получить урок с заданным id Параметры: lesson: id урока (int / str) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Lessons/Lessons_Get """ return self.get(f"lessons/{lesson}", model=types.Lesson) def get_eduGroup_lesson_by_period(self, group: int | str, from_: datetime | date, to: datetime | date) -> list[types.Lesson]: """[GET] edu-groups/{group}/lessons/{from_}/{to} Уроки группы за период Параметры: group: id класса или учебной группы (``str`` / ``int``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Lessons/Lessons_GetByGroupAndPeriod """ return self.get(f"edu-groups/{group}/lessons/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Lesson, is_list=True) def get_eduGroup_lesson_by_period_and_subject(self, group: int | str, subject: int | str, from_: datetime | date, to: datetime | date) -> list[types.Lesson]: """[GET] edu-groups/{group}/subjects/{subject}/lessons/{from_}/{to} Уроки группы по предмету за период Параметры: group: id класса или учебной группы (``str`` / ``int``) subject: id предмета (``str`` / ``int``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Lessons/Lessons_GetByGroupAndPeriodAndSubject """ return self.get(f"edu-groups/{group}/subjects/{subject}/lessons/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Lesson, is_list=True) def get_work_marks_histogram(self, workID: int | str) -> types.MarksHistogram: """[GET] works/{workID}/marks/histogram Получение деперсонализированной гистограмы оценок всего класса по идентификатору работы Параметры: workID: id работы на уроке (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkHistograms/MarkHistograms_GetMarksByWork """ return self.get(f"works/{workID}/marks/histogram", model=types.MarksHistogram) def get_marks_histogram_by_period(self, periodID: int | str, subjectID: int | str, groupID: int | str) -> types.MarksHistogramByPeriod: """[GET] periods/{periodID}/subjects/{subjectID}/groups/{groupID}/marks/histogram Получение деперсонализированной гистограмы оценок всего класса за отчетный период Параметры: periodID: id отчетного периода (``int`` / ``str``) subjectID: id предмета (``int`` / ``str``) groupID: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkHistograms/MarkHistograms_GetMarksByPeriod """ return self.get(f"periods/{periodID}/subjects/{subjectID}/groups/{groupID}/marks/histogram", model=types.MarksHistogramByPeriod) def get_mark(self, mark: int | str) -> types.Mark: """[GET] marks/{mark} Оценка Параметры: mark: id оценки (не work-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_Get """ return self.get(f"marks/{mark}", model=types.Mark) def get_work_marks(self, work: int | str) -> list[types.Mark]: """[GET] works/{work}/marks Список оценок за определенную работу на уроке Параметры: work: id работы (не mark-id или lesson-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByWork """ return self.get(f"works/{work}/marks", model=types.Mark, is_list=True) def get_lesson_marks(self, lesson: int | str) -> list[types.Mark]: """[GET] lessons/{lesson}/marks Оценки на уроке Параметры: lessson: id урока (не mark-id или work-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByLesson """ return self.get(f"lessons/{lesson}/marks", model=types.Mark, is_list=True) def get_eduGroup_marks(self, group: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] edu-groups/{group}/marks/{from_}/{to} Оценки учебной группы за период Параметры: group: id учебной группы или класса (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByGroup """ return self.get(f"edu-groups/{group}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) def get_eduGroup_marks_by_subject(self, group: int | str, subject: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] edu-groups/{group}/subjects/{subject}/marks/{from_}/{to} Оценки учебной группы по предмету за период Параметры: group: id учебной группы или класса (``int`` / ``str``) subject: id предмета (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByGroupAndSubject """ return self.get(f"edu-groups/{group}/subjects/{subject}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) def get_person_marks_in_school(self, person: int | str, school: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/schools/{school}/marks/{from}/{to} Оценки персоны в школе за период Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) school: id школы (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetBySchoolAndPersonAndPeriod """ return self.get(f"persons/{self.check_person(person)}/schools/{school}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) def get_person_marks_in_eduGroup(self, person: int | str, group: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/edu-groups/{group}/marks/{from}/{to} Оценки персоны в учебной группе за период Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) group: id учебной группы (``int`` / ``str``) (``EduGroup``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByGroupAndPersonAndPeriod """ return self.get(f"persons/{self.check_person(person)}/edu-groups/{group}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) def get_person_marks_on_lesson(self, person: int | str, lesson: str | int) -> list[types.Mark]: """[GET] persons/{person}/lessons/{lesson}/marks Оценки персоны за урок Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) lesson: id урока (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByLessonAndPerson """ return self.get(f"persons/{self.check_person(person)}/lessons/{lesson}/marks", model=types.Mark, is_list=True) def get_person_marks_on_work(self, person: int | str, work: str | int) -> list[types.Mark]: """[GET] persons/{person}/lessons/{lesson}/marks Оценки персоны за работу Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) work: id работы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByWorkAndPerson """ return self.get(f"persons/{self.check_person(person)}/works/{work}/marks", model=types.Mark, is_list=True) def get_person_marks_by_subject(self, person: int | str, subject: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/subjects/{subject}/marks/{from_}/{to} Оценки персоны по предмету за период Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByPersonAndSubject """ return self.get(f"persons/{person}/subjects/{subject}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) def get_person_marks_on_lesson_by_date(self, person: int | str, date: datetime | date) -> list[types.Mark]: """[GET] lessons/{date}/persons/{person}/marks Оценки персоны по дате урока Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) date: дата урока (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByPersonAndLessonDate """ return self.get(f"lessons/{date}/persons/{self.check_person(person)}/marks", model=types.Mark, is_list=True) def get_person_marks_by_date(self, person: int | str, date: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/marks/{date} Оценки персоны по дате выставления оценки Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) date: дата выставления оценки (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByPersonAndMarkDate """ return self.get(f"persons/{self.check_person(person)}/marks/{date}", model=types.Mark, is_list=True) def get_marks_values(self) -> dict[str, list[str | None]]: """[GET] marks/values Метод возвращает все поддерживаемые системы (типы) оценок и все возможные оценки в каждой из систем.\n Например, для системы "mark5" возвращается массив из следующих оценок: "mark5" : ["1-","1","1+","2-","2","2+","3-","3","3+","4-","4","4+","5-","5","5+"] Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkValues/MarkValues_GetAll """ return self.get("marks/values", return_json=True) def get_marks_values_by_type(self, type: str) -> list[str]: """[GET] marks/values/type/{type} Метод возвращает все возможные оценки в запрашиваемой системе (типе) оценок.\n Чтобы узнать, какие типы поддерживаются нужно предварительно делать запрос marks/values без параметров.\n Например, для запроса marks/values/type/mark5 ответом будет list["1-", "1", "1+", "2-", "2", "2+", "3-", "3", "3+", "4-", "4", "4+", "5-", "5", "5+"]. Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkValues/MarkValues_GetByType """ return self.get(f"marks/values/type/{type}", return_json=True) def get_recent_marks(self, person: str | int, group: int | str, fromDate: datetime | date = None, subject: int | str = None, limit: int = 10) -> types.RecentMarks: """[GET] persons/{person}/group/{group}/recentmarks Последние оценки/отметки посещаемости персоны по предмету, указанному в параметре subject, начиная с даты определенном в параметре fromDate, и с ограничением на выводимое количество указанном в параметре limit Параметры: ``person``: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) ``group``: id класса или учебной группы (``int`` / ``str``) ``*OPTIONAL*``: ``fromDate``: (``datetime.datetime`` / ``datetime.date``) Дата и время, начиная от которого будут выводится оценки/отметки посещаемости. Если не указанно, то результат будет выводится с сегодняшнего дня включительно. Параметр применим для постраничного вывода оценок/отметок посещаемости по конкретному предмету ``subject``: (``int`` / ``str``) id предмета. Если не задан, то результат будет включать в себя оценки/отметки посещаемости по всем предметам, но по каждому предмету будет накладываться ограничение указанном в параметре limit ``limit``: (``int`` = 10) Количество оценок по предмету. Если не задан, то будет применено ограничение по умолчанию, равное 10. Значение должно быть задано в интервале от 1 до 100. """ params = {"limit": str(limit)} if fromDate: params["fromDate"] = self.datetime_to_string(fromDate) if subject: params["subject"] = str(subject) return self.get(f"persons/{self.check_person(person)}/group/{group}/recentmarks", params=params, model=types.RecentMarks) def get_task(self, task: str | int) -> types.Task: """[GET] tasks/{task} Домашнее задание Параметры: task: task-id домашнего задания (не work-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_Get """ return self.get(f"tasks/{task}", model=types.Task) def get_lesson_tasks(self, lesson: str | int) -> list[types.Task]: """[GET] lessons/{lesson}/tasks Список Домашних заданий на урок Параметры: lesson: id урока (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_GetByLesson """ return self.get(f"lessons/{lesson}/tasks", model=types.Task, is_list=True) def get_work_tasks(self, work: str | int, persons: str | int | list[int | str]) -> list[types.Task]: """[GET] works/{work}/tasks Список Домашних заданий Параметры: work: id работы (homework) (``str`` / ``int``) persons: id (одно или несколько, обернутых в список) персоны (``int`` / ``str`` / ``list[str | int]``) (``"me"``, для текущего пользователя (можно и в списке указать)) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_GetByWork """ return self.get(f"works/{work}/tasks{'?persons={}'.format(self.check_person(persons) if (isinstance(persons, int) or isinstance(persons, str)) else '&persons='.join([self.check_person(i) for i in persons]))}", model=types.Task, is_list=True) def get_undone_person_tasks(self, personId: str | int = "me") -> list[types.Task]: """[GET] persons/{personId}/undone Список невыполненных Домашних заданий обучающегося с истекшим сроком выполнения Параметры: personId: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_ListNotCompletedByPersonId """ return self.get(f"persons/{self.check_person(personId)}/undone", model=types.Task, is_list=True) def get_person_tasks(self, person: str | int, subject: int | str, from_: datetime | date, to: datetime | date, pageNumber: int = None, pageSize: int = None) -> list[types.Task]: """[GET] persons{person}/tasks Список Домашних заданий ученика по предмету Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало интервала дат (``datetime.datetime``) to: конец интервала дат (``datetime.datetime``) pageNumber: номер страницы (``int``) (``*optional*``) pageSize: размер страницы (``int``) (``*optional*``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_GetByPersonAndSubject """ params = { "subject": str(subject), "from": self.datetime_to_string(from_), "to": self.datetime_to_string(to), } if pageNumber: params["pageNumber"] = str(pageNumber) if pageSize: params["pageSize"] = str(pageSize) return self.get(f"persons/{self.check_person(person)}/tasks", model=types.Task, is_list=True, params=params) def get_eduGroup_subjects(self, eduGroup: int | str) -> list[types.Subject]: """[GET] edu-groups/{eduGroup}/subjects Список предметов, преподаваемых в классе в текущем отчетном периоде Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Subjects/Subjects_GetByEduGroup """ return self.get(f"edu-groups/{eduGroup}/subjects", model=types.Subject, is_list=True) def get_school_subjects(self, school: int | str) -> list[types.Subject]: """[GET] schools/{school}/subjects Список предметов, преподаваемых в образовательной организации в текущем учебном году Параметры: school: id школы (``int`` / ``str``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Subjects/Subjects_GetSchoolSubjects """ return self.get(f"schools/{school}/subjects", model=types.Subject, is_list=True) def get_school_parameters(self, school: int | str) -> types.SchoolParameters: """[GET] schools/{school}/parameters Параметры общеобразовательной организации Параметры: school: id школы (``int`` / ``str``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/SchoolsParameters/SchoolsParameters_Get """ return self.get(f"schools/{school}/parameters", model=types.SchoolParameters) def get_school(self, school: int | str) -> types.School: """[GET] schools/{school} Профиль школы Параметры: school: id школы (``int`` / ``str``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schools/Schools_Get """ return self.get(f"schools/{school}", model=types.School) def get_school_membership(self, school: int | str, schoolMembershipType: str = "Staff") -> list[types.Person]: """[GET] schools/{school}/membership Список профилей пользователей школы Параметры: school: id школы (``int`` / ``str``) schoolMembershipType: тип запрашиваемых пользователей (``"Staff" / "Admins"``) (``str``). По умолчанию стоит ``"Staff"`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schools/Schools_GetSchoolMembership """ return self.get(f"schools/{school}/membership?schoolMembershipType={schoolMembershipType}", model=types.Person, is_list=True) def get_person_schools(self, excludeOrganizations: bool = "") -> list[types.School]: """[GET] schools/person-schools Список образовательных организаций текущего пользователя Параметры: excludeOrganizations: - (``bool``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schools/Schools_GetPersonSchools """ return self.get("schools/person-schools" + ("" if excludeOrganizations == "" else "?excludeOrganizations={}".format('true' if excludeOrganizations else 'false')), model=types.School, is_list=True) def get_person_schedules(self, person: int | str, group: int | str, startDate: datetime | date, endDate: datetime | date) -> types.Schedule: """[GET] persons/{person}/groups/{group}/schedules Расписание ученика Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) group: id учебной группы или класса (``int`` / ``str``) (``EduGroupID``) startDate: дата начала периода (``datetime.datetime``) endDate: дата завершения периода (``datetime.datetime``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schedules/Schedules_GetByPersonAndPeriod """ return self.get(f"persons/{self.check_person(person)}/groups/{group}/schedules?startDate={self.datetime_to_string(startDate)}&endDate={self.datetime_to_string(endDate)}", model=types.Schedule) def get_eduGroup_reporting_periods(self, eduGroup: int | str) -> list[types.ReportingPeriod]: """[GET] edu-groups/{eduGroup}/reporting-periods Список отчётных периодов для класса или учебной группы Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/ReportingPeriods/ReportingPeriods_GetByEduGroup """ return self.get(f"edu-groups/{eduGroup}/reporting-periods", model=types.ReportingPeriod, is_list=True) def get_eduGroup_reporting_periods_all(self, eduGroup: int | str) -> types.ReportingPeriodEduGroup: """[GET] edu-groups/{eduGroup}/reporting-periods-group Группа отчётных периодов для класса или учебной группы Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/ReportingPeriods/ReportingPeriods_GetGroupReportingPeriodsGroup """ return self.get(f"edu-groups/{eduGroup}/reporting-periods-group", model=types.ReportingPeriodEduGroup) def get_person(self, person: int | str = "me") -> types.Person: """[GET] persons/{person} Профиль персоны Параметры: person: id персоны (``"me"``, или пусто для текущего пользователя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Persons/Persons_Get """ return self.get(f"persons/{self.check_person(person)}", model=types.Person) def get_eduGroup_students(self, eduGroup: int | str) -> list[types.Person]: """[GET] edu-groups/{eduGroup}/students Список учеников в классе или учебной группе Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Persons/Persons_GetByEduGroup_0 """ return self.get(f"edu-groups/{eduGroup}/students", model=types.Person, is_list=True) def search_person( self, lastName: Optional[str] = None, firstName: Optional[str] = None, middleName: Optional[str] = None, snils: Optional[str] = None, birthday: Optional[date] = None, ) -> None | list[types.Person]: """[GET] person/search Поиск персоны Параметры: lastName: Фамилия (``str``, ``*optional*``) firstName: Имя (``str``, ``*optional*``) middleName: Отчество (``str``, ``*optional*``) snils: СНИЛС (``str``, ``*optional*``) birthday: ДАТА РОЖДЕНИЯ (``datetime.date``, ``*optional*``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Persons/Persons_Search """ params = {} if lastName: params["lastName"] = lastName if firstName: params["firstName"] = firstName if middleName: params["middleName"] = middleName if snils: params["snils"] = snils if birthday: params["birthday"] = self.date_to_string(birthday) return self.get("person/search", params=params, model=types.Person, is_list=True) def get_eduGroup_teachers(self, group: int | str) -> list[types.EduGroupTeacher]: """[GET] edu-groups/{group}/teachers Список учителей, которые ведут уроки в данной группе, учитываются уроки от недели назад и на 30 дней вперед Параметры: group: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Teacher/Teacher_GetEduGroupTeachers """ return self.get(f'edu-groups/{group}/teachers', model=types.EduGroupTeacher, is_list=True) def get_school_teachers(self, school: int | str) -> list[types.SchoolTeacher]: """[GET] teacher/{teacher}/students Список преподавателей в выбранной образовательной организации Параметры: teacher: person-id учителя (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Teacher/Teacher_GetSchoolTeachers """ return self.get(f'schools/{school}/teachers', model=types.SchoolTeacher, is_list=True) def get_teacher_students(self, teacher: int | str) -> list[types.TeacherStudent]: """[GET] teacher/{teacher}/students Список учеников для учителя который ведет уроки у этих учеников(они должны быть в расписании) от недели назад и на 30 дней вперед Параметры: teacher: person-id учителя (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Teacher/Teacher_GetStudentsByTeacher """ return self.get(f'teacher/{teacher}/students', model=types.TeacherStudent, is_list=True) def get_eduGroup_timetable(self, eduGroup: int | str) -> types.TimeTable: """[GET] edu-groups/{eduGroup}/timetables Получение расписания учебной группы Параметры: eduGroup: id класса или учбеной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Timetables/Timetables_GetByEduGroup """ return self.get(f'edu-groups/{eduGroup}/timetables', model=types.TimeTable) def get_school_timetable(self, school: int | str) -> types.TimeTable: """[GET] schools/{school}/timetables Получение расписания школы Параметры: school: id школы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Timetables/Timetables_GetBySchool """ return self.get(f'schools/{school}/timetables', model=types.TimeTable) def get_user_feed(self, date: datetime | date, childPersonId: int | str = None, limit: int | str = None) -> types.UserFeed: """[GET] users/me/feed Лента пользователя Параметры: date: Дата начала временного интервала (``datetime.datetime``) childPersonId: id персоны ребёнка (``int`` | ``str``) (``optional``) limit: Ограничение временного интервала в днях (``int``) (``optional``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserFeeds/UserFeeds_GetUserFeed """ params = {"date": self.datetime_to_string(date)} if childPersonId: params["childPersonId"] = childPersonId if limit: params["limit"] = limit return self.get('users/me/feed', model=types.UserFeed, params=params) def get_my_children_relatives(self) -> list[types.UserRelatives | None] | None: """[GET] users/me/childrenrelatives Список id всех родственных связей детей произвольного пользователя Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserRelatives/UserRelatives_GetOwnChildrenRelatives """ return self.get(f"users/me/childrenrelatives", model=types.UserRelatives, is_list=True) def get_my_childrens(self) -> list[int | None] | None: """[GET] users/me/children Список id пользователей детей текущего пользователя Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserRelatives/UserRelatives_GetOwnChildren """ return self.get(f"users/me/children", return_json=True) def get_user_relatives(self, user: str | int = "me") -> types.UserRelatives: """[GET] users/{user}/relatives | users/me/relatives Получение всех родственных связей произвольного/текущего пользователя. Параметры: user: id пользователя (``int`` / ``str``) (``"me"``, для текущего пользователя) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserRelatives/UserRelatives_GetRelatives """ return self.get(f"users/{user}/relatives", model=types.UserRelatives) def get_user(self, user: str | int = "me") -> types.User: """[GET] users/{user} | users/me Профиль текущего пользователя (или по ID) Параметры: user: id пользователя (``int`` / ``str``) (``"me"``, для текущего пользователя) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Users/Users_Get """ return self.get(f"users/{user}", model=types.User) def get_user_roles(self, user: str | int = "me") -> list[int | None] | None: """[GET] users/{user}/roles | users/me/roles Профиль текущего пользователя (или по ID) Параметры: user: id пользователя (``int`` / ``str``) (``"me"``, для текущего пользователя) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Users/Users_Get """ return self.get(f"users/{user}/roles", return_json=True) def get_weighted_average_marks(self, group: int | str, from_: datetime | date, to: datetime | date) -> types.WeightedAverageMarks: """[GET] edu-groups/{group}/wa-marks/{from_}/{to} Получить взвешенные оценки за период. Параметры: group: id класса или учебной группы (``int`` / ``str``) (``EduGroup``) from_: начало периода (``datetime.datetime``) to: конец периода (``datetime.datetime``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/WeightedAverageMarks/WeightedAverageMarks_GetGroupAverageMarks """ return self.get(f'edu-groups/{group}/wa-marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}', model=types.WeightedAverageMarks) def get_lesson_works(self, lesson: str | int) -> types.Work: """[GET] lessons/{lesson}/works Список работ на уроке Параметры: lesson: id урока (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Works/Works_GetByLesson_0 """ return self.get(f'lessons/{lesson}/works', model=types.Work, is_list=True) def get_work(self, work: str | int) -> types.Work: """[GET] works/{work} Работа на уроке по ID Параметры: work: id работы (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Works/Works_Get """ return self.get(f'works/{work}', model=types.Work) def edit_homework_status(self, work: int | str, person: str | int = "me", change: dict[str, str] = {"action": "StartWorking"}): """[POST] works/{work}/persons/{person}/status Изменить статус выполнения домашней работы учащимся. Параметры: work: id урока (``int`` / ``str``) person: id персоны (``int`` / ``str``) (``"me"``, для себя) change: статус (``dict[str, str]``) : Пример -> ``{"action": "StartWorking"}`` Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Works/Works_ChangeStatus """ return self.post(f'works/{work}/persons/{self.check_person(person)}/status', return_json=True, data=change) def get_school_work_types(self, school: str | int) -> list[types.WorkType]: """[GET] work-types/{school} Получение списка всех типов работ школы Параметры: school: id школы (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/WorkTypes/WorkTypes_Get """ return self.get(f'work-types/{school}', model=types.WorkType, is_list=True)
/school_mosreg_api-0.9.4.tar.gz/school_mosreg_api-0.9.4/school_mosreg_api/api/methods.py
0.700792
0.255471
methods.py
pypi
from .model import Type from datetime import datetime from .Works import Work class ImportantWork(Type): work: Work subjectName: str workTypeName: str class Mark_(Type): id: int id_str: str type: str value: str textValue: str | None = None person: int person_str: str | None = None work: int work_str: str lesson: int | None = None lesson_str: str | None = None number: int | None = None date: datetime | None = None workType: int mood: str | None = None use_avg_calc: bool | None = None class MarksList(Type): marks: list[Mark_] class Mark(Type): mark: Mark_ class Lesson(Type): id: int date: datetime class MarksCard(Type): marks: list[Mark] lesson: Lesson | None = None isImportant: bool | None = None subjectName: str subjectId: int workTypeName: str class Summary(Type): importantWorks: list[ImportantWork] = [] marksCards: list[MarksCard] = [] dayEmotion: str feedMode: str class NextDayHomeworks(Type): work: Work subjectName: str workTypeName: str class ImportantWorkType(Type): id: int name: str class NextDaySchedule(Type): lessonId: int lessonStatus: str number: int subjectName: str importantWorkTypes: list[ImportantWorkType] class TodayHomework(Type): work: Work subjectName: str workTypeName: str class TodaySchedule(Type): lessonId: int | None = None lessonStatus: str | None = None number: int | None = None subjectName: str importantWorkTypes: list[ImportantWorkType] = [] class LessonLogEntry(Type): person: int lesson: int | None = None person_str: str lesson_str: str | None = None comment: str | None = None status: str | None = None createdDate: datetime | None = None class LogEntries(Type): lessonLogEntry: LessonLogEntry | None = None subjectName: str lessonTitle: str class FeedDay(Type): """...""" date: datetime nextWorkingDayDate: datetime | None = None summary: Summary | None = None nextDayHomeworks: list[NextDayHomeworks] = [] nextDaySchedule: list[NextDaySchedule] = [] todayHomeworks: list[TodayHomework] = [] todaySchedule: list[TodaySchedule] = [] logEntries: list[LogEntries] = [] class UserFeed(Type): """[GET] /v2.0/users/me/feed\n~~~\nЛента пользователя\n~~~\nПрава доступа: EducationalInfo\n~~~""" days: list[FeedDay] | None = None
/school_mosreg_api-0.9.4.tar.gz/school_mosreg_api-0.9.4/school_mosreg_api/types/UserFeeds.py
0.524882
0.267001
UserFeeds.py
pypi
from .model import Type class HobbyGroup(Type): schoolId: int schoolId_str: str hobbyGroupName: str hobbyGroupDescription: str hobbyGroupSchedule: str hobbyGroupPayment: str | None | None = None hobbyGroupEnrollment: str class LearningResult(Type): schoolId: int schoolId_str: str studyYear: int prizesAtAllRussiaOlympiadPercentage: int | None = None basicCertificatesReceivedPercentage: int | None = None certificatesReceivedPercentage: int | None = None egeMathBaseLevelAverageScore: int | None = None egeMathProfileLevelAverageScore: int | None = None egeRussianAverageScore: int | None = None giaMathPassedPercentage: int | None = None giaMathPassedPerfectlyPercentage: int | None = None giaRussianPassedPercentage: int | None = None giaRussianPassedPerfectlyPercentage: int | None = None medalistsCount: int | None = None medalistsPercentage: int | None = None enrolledPercentage: int | None = None rankedInTheTop100: bool | None = None class SchoolParameters(Type): """[GET] /v2.0/schools/{school}/parameters\n~~~\nПараметры общеобразовательных организаций\n~~~""" schoolId: int schoolId_str: str municipality: str fullName: str shortName: str status: str foundationYear: int hasspecialty: bool specialty: str address: list[str] website: str email: str schoolPhone: str schoolDaysCount: int workHours: str shiftsCount: int eduOrgForm: str directorFullName: str directorPhone: str directorQualification: str directorPhoto: str licence: str accreditation: str charter: str cooperationName: str cooperationWebsite: str prescriptions: str activeJournalUrl: str inn: str facadePhoto: str cantinaPhoto: str gymPhoto: str classroomPhoto: str physicsroomPhoto: str chemicsryroomPhoto: str biologyroomPhoto: str mathroomPhoto: str russianroomPhoto: str musicroomPhoto: str educationalLevels: list[str] generalEducationalProgrammsLicense: str furtherEducationalProgramsLicense: str primarySchoolEducationalPrograms: list[str] hasAdaptedEducationalPrograms: bool | None = None hasOwnEducationalPrograms: bool | None = None ownEducationalPrograms: str hasDistEduTech: bool | None = None teachingForeignLanguages: list[str] studyPlan: str actualOccupancy: int | None = None designCapacity: int | None = None enrollmentIsOpen: bool | None = None vacantPlacesCount: int | None = None disabledChildrenCount: int | None = None firstStageGroupsCount: int | None = None secondStageGroupsCount: int | None = None thirdStageGroupsCount: int | None = None firstStageStudentsCount: int | None = None secondStageStudentsCount: int | None = None thirdStageStudentsCount: int | None = None teachersCount: int | None = None teachersWithHigherEducationCount: int | None = None highestCategoryTeachersCount: int | None = None publications: str laureatesOfContestsCount: str laureatesOfContestsInfo: str hasEducationalPsychologists: bool | None = None hasDefectologists: bool | None = None hasSpeechTherapists: bool | None = None hasSocialWorkers: bool | None = None hasMedicalWorkers: bool | None = None studentsPerComputer: int | None = None hasCompensatingClasses: bool | None = None hasBarrierFreeEnvironment: bool | None = None hasSwimmingPool: bool | None = None hasMedicalOffice: bool | None = None hasDiningRoom: bool | None = None hasGym: bool | None = None gymCount: int | None = None hasAssemblyHall: bool | None = None hasWinterGarden: bool | None = None hasCctv: bool | None = None hasGpd: bool | None = None schoolSiteArea: int | None = None hasEstheticZone: bool | None = None trainingExperimentalPlotArea: int | None = None hasSportArea: bool | None = None hasRecreationArea: bool | None = None hasZonesForTrainingInPreventionOfChildRoadTrafficInjuries: bool | None = None hasAreasOfEconomicPurpose: bool | None = None hobbyGroups: list[HobbyGroup] learningResults: list[LearningResult]
/school_mosreg_api-0.9.4.tar.gz/school_mosreg_api-0.9.4/school_mosreg_api/types/SchoolsParameters.py
0.64512
0.346293
SchoolsParameters.py
pypi
import asyncio from datetime import date, datetime from typing import Any from .base import AsyncBaseAPI from .. import types class AsyncSchoolMosregRUAPI(AsyncBaseAPI): """Основной Async класс почти со всеми функциями API.\n~~~""" async def check_person(self, value): if value == "me": return (await self.get_user()).personId else: return value async def check_user(self, value): if value == "me": return (await self.get_user()).id else: return value async def get_me_organizations(self) -> list[int] | None: """[GET] users/me/organizations Список идентификаторов организаций текущего пользователя Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Authorities/Authorities_GetOwnOrganizations """ return await self.get("users/me/organizations", return_json=True) async def get_organization(self, organizationId: int | str) -> types.Organization: """[GET] users/me/organizations/{organizationId} Данные указанной организации пользователя. Параметры: organizationId: Идентификатор организации (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Authorities/Authorities_GetOrganizationInfo """ return await self.get(f"users/me/organizations/{organizationId}", model=types.Organization) async def get_token_with_code(self, code: str, client_id: str, client_secret: str, grant_type: str, refreshToken: str) -> types.TokenWithCode: """[POST] authorizations Обменять код доступа на токен Параметры: code: ``str`` client_id: ``str`` client_secret: ``str`` grant_type: ``str`` refreshToken: ``str`` Параметры запроса [POST] method="authorizations": data: Код доступа -> Пример: ``{`` ``"code": "string",`` ``"client_id": "00000000-0000-0000-0000-000000000000",`` ``"client_secret": "00000000-0000-0000-0000-000000000000",`` ``"grant_type": "NotSet",`` ``"refreshToken": "string"`` ``}`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Authorizations/Authorizations_PostTokenRequestCode """ return await self.post("authorizations", model=types.TokenWithCode, data={ "code": code, "client_id": client_id, "client_secret": client_secret, "grant_type": grant_type, "refreshToken": refreshToken}) async def get_person_avg_marks(self, person: int | str, period) -> str | None: """[GET] persons/{person}/reporting-periods/{period}/avg-mark Оценка персоны за отчетный период Параметры: person: id персоны (``"me"``, для себя) period: id отчетного периода Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_GetByPersonAndPeriod """ return await self.get(f"persons/{await self.check_person(person)}/reporting-periods/{period}/avg-mark", return_json=True) async def get_person_avg_marks_by_subject(self, person: int | str, period, subject) -> str | None: """[GET] persons/{person}/reporting-periods/{period}/subjects/{subject}/avg-mark Оценка персоны по предмету за отчетный период Параметры: person: id персоны (``"me"``, для себя) period: id отчетного периода subject: id предмета Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_GetByPersonAndPeriodAndSubject """ return await self.get(f"persons/{await self.check_person(person)}/reporting-periods/{period}/subjects/{subject}/avg-mark", return_json=True) async def get_eduGroup_avg_marks_to_date(self, group: int | str, period: int | str, date: datetime | date) -> list[dict[str, Any]] | None: """[GET] edu-groups/{group}/reporting-periods/{period}/avg-marks/{date} Оценки учебной группы по предмету за отчетный период до определенной даты Параметры: group: id учебной группы (``EduGroup``) (``int`` / ``str``) period: id отчетного периода (``int`` / ``str``) date: дата (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_GetByGroupAndPeriodOnDate """ return await self.get(f"edu-groups/{group}/reporting-periods/{period}/avg-marks/{self.datetime_to_string(date)}", return_json=True) async def get_eduGroup_avg_marks(self, group: int | str, from_: datetime | date, to: datetime | date) -> list[dict[str, Any]] | None: """[GET] edu-groups/{group}/avg-marks/{from}/{to} Оценки учебной группы за период Параметры: group: id учебной группы (``EduGroup``) (``int`` / ``str``) from_: id начало периода (``datetime.datetime`` / ``datetime.date``) to: id конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/AverageMarks/AverageMarks_ListByGroupAndDates """ return await self.get(f"edu-groups/{group}/avg-marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", return_json=True) async def get_user_childrens(self, userID: str | int = "me") -> list[types.Children] | None: """[GET] user/{userID}/children Получение списка детей по идентификатору родительского пользователя Параметры: userID: 'id' пользователя (``str`` / ``int``) (``"me"``, для себя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Children/Children_GetChildrenByUserID """ return await self.get(f"user/{await self.check_user(userID)}/children", model=types.Children, is_list=True) async def get_person_childrens(self, personID: str | int = "me") -> list[types.Children] | None: """[GET] person/{personID}/children Получение списка детей по идентификатору родительской персоны Параметры: personID: 'id' пользователя (``str`` / ``int``) (``"me"``, для себя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Children/Children_GetChildrenByPersonID """ return await self.get(f"person/{await self.check_person(personID)}/children", model=types.Children, is_list=True) async def get_me_classmates(self) -> list[int] | None: """[GET] users/me/classmates Список id пользователей одноклассников текущего пользователя, если он является учеником, либо список активных участников образовательных групп пользователя во всех остальных случаях Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Classmates/Classmates_GetOwnClassmates """ return await self.get("users/me/classmates", return_json=True) async def get_context(self, userId: str | int = "me") -> types.Context: """[GET] users/{userId}/context | users/me/context Получение контекстной информации по пользователю Параметры: userId: id пользователя ("me" или оставьте пустым для себя) (int/str) Права доступа: ``CommonInfo``, ``FriendsAndRelatives``, ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Context """ return await self.get(f"users/{userId}/context", model=types.Context) async def get_user_school_memberships(self, user: int | str = "me") -> types.SchoolMemberships: """[GET] users/{user}/school-memberships Список участий в школах для произвольного пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetByUser """ return await self.get(f"users/{user}/school-memberships", model=types.SchoolMemberships) async def get_user_education(self, user: int | str = "me") -> types.SchoolMemberships: """[GET] users/{user}/education Список участий в школах для произвольного пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetByUser_0 """ return await self.get(f"users/{await self.check_user(user)}/education", model=types.SchoolMemberships) async def get_person_education(self, person: int | str = "me") -> list[types.SchoolMemberships]: """[GET] persons/{person}/school-memberships Список участий в школах для произвольного пользователя Параметры: person: id персоны (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetSchoolMembershipsByPerson """ return await self.get(f"persons/{await self.check_person(person)}/school-memberships", model=types.SchoolMemberships) async def get_user_schools(self, user: str | int = "me") -> list[int] | None: """[GET] users/{user}/schools | users/me/schools Список идентификаторов школ произвольного/текущего пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetSchoolsByUser """ return await self.get(f"users/{user}/schools", return_json=True) async def get_user_eduGroups(self, user: str | int = "me") -> list[int] | None: """[GET] users/{user}/edu-groups | users/me/edu-groups Список идентификаторов классов произвольного/текущего пользователя Параметры: user: id пользователя (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EducationMemberships/EducationMemberships_GetEduGroupsByUser """ return await self.get(f"users/{user}/edu-groups", return_json=True) async def get_eduGroup(self, eduGroup: int | str) -> types.EduGroup: """[GET] edu-groups/{eduGroup} Класс или учебная группа Параметры: eduGroup: id класса или образовательной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_Get """ return await self.get(f"edu-groups/{eduGroup}", model=types.EduGroup) async def get_school_eduGroups(self, school: str | int) -> list[types.EduGroup]: """[GET] schools/{school}/edu-groups Список классов в школе Параметры: school: id школы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_GetBySchool """ return await self.get(f"schools/{school}/edu-groups", model=types.EduGroup, is_list=True) async def get_person_eduGroups(self, person: str | int = "me") -> list[types.EduGroup]: """[GET] persons/{person}/edu-groups Учебные группы персоны Параметры: person: id персоны (``"me"`` или оставьте пустым для себя) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_GetByPerson """ return await self.get(f"persons/{await self.check_person(person)}/edu-groups", model=types.EduGroup, is_list=True) async def get_eduGroup_persons(self, eduGroup: int | str) -> list[types.Person] | None: """[GET] edu-groups/{eduGroup}/persons Список учеников учебной группы Параметры: eduGroup: id учебной группы (int/str) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_GetGroupPersons """ return await self.get(f"edu-groups/{eduGroup}/persons", model=types.Person, is_list=True) async def get_parallel_eduGroups(self, groupId: int | str) -> list[types.EduGroup]: """[GET] edu-groups/{groupId}/parallel Учебные группы персоны Параметры: person: id персоны (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/EduGroups/EduGroups_ListParallelGroups """ return await self.get(f"edu-groups/{groupId}/parallel", model=types.EduGroup, is_list=True) async def get_homeworks_by_period(self, school: str | int, startDate: datetime | date, endDate: datetime | date) -> types.HomeWork: """[GET] users/me/school/{school}/homeworks?startDate={startDate}&endDate={endDate} Получить домашние задания пользователя за период времени Параметры: school: id школы (``int`` / ``str``) startDate: дата начало периода (``datetime.datetime`` / ``datetime.date``) endDate: дата конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Homeworks/Homeworks_ListUserHomeworksByPeriod """ return await self.get(f"users/me/school/{school}/homeworks?startDate={self.datetime_to_string(startDate)}&endDate={self.datetime_to_string(endDate)}", model=types.HomeWork) async def get_homeworks_by_Ids(self, ids: int | str | list[int | str]) -> types.HomeWork: """[GET] users/me/school/homeworks?homeworkId={ids} Получить домашние задания по идентификаторам Параметры: ids: work-id домашнего задания (допускается лист) (``int`` / ``str`` / ``list[str / int]``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Homeworks/Homeworks_GetUserHomeworkByIds """ return await self.get(f"users/me/school/homeworks?homeworkId={ids if (isinstance(ids, int) or isinstance(ids, str)) else '&homeworkId='.join(ids)}", model=types.HomeWork) async def get_lesson_log_entries(self, lesson: str | int) -> list[types.LessonLogEntries] | None: """[GET] lessons/{lesson}/log-entries Список отметок о посещаемости на уроке Параметры: lesson: id урока (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_GetByLesson """ return await self.get(f"lessons/{lesson}/log-entries", model=types.LessonLogEntries, is_list=True) async def get_person_lesson_log_entries(self, lesson: str | int, person: str | int = "me") -> types.LessonLogEntries: """[GET] lesson-log-entries/lesson/{lesson}/person/{person} Отметка о посещаемости ученика на уроке Параметры: lesson: id урока (int / str) person: id персоны ("me" или оставьте пустым для себя) (int/str) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_Get """ return await self.get(f"lesson-log-entries/lesson/{lesson}/person/{await self.check_person(person)}/edu-groups", model=types.LessonLogEntries) async def get_eduGroup_lessons_log_entries(self, eduGroup: str | int, subject: str | int, from_: datetime | date, to: datetime | date) -> list[types.LessonLogEntries]: """[GET] lesson-log-entries/group/{eduGroup}?subject={subject}&from={from_}&to={to} Список отметок о посещаемости на уроках по заданному предмету в классе за интервал времени Параметры: eduGroup: id учебной группы / класса (``int`` / ``str``) subject: id предмета (``int`` / ``str``) from_: начало интервала (``datetime.datetime`` / ``datetime.date``) to: конец интервала (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_Get """ return await self.get(f"lesson-log-entries/group/{eduGroup}?subject={subject}&from={self.datetime_to_string(from_)}&to={self.datetime_to_string(to)}", model=types.LessonLogEntries, is_list=True) async def get_person_lessons_log_entries_by_subject(self, personID: str | int, subjectID: str | int, from_: datetime | date, to: datetime | date) -> list[types.LessonLogEntries]: """[GET] lesson-log-entries/person={personID}&subject={subjectID}&from={from}&to={to} Список отметок о посещаемости обучающегося по предмету за интервал времени Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало интервала (``datetime.datetime`` / ``datetime.date``) to: конец интервала (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_ListByPersonAndSubjectAndDateRange_0 """ return await self.get(f"lesson-log-entries/person={await self.check_person(personID)}&subject={subjectID}&from={self.datetime_to_string(from_)}&to={self.datetime_to_string(to)}", model=types.LessonLogEntries, is_list=True) async def get_person_lessons_log_entries(self, person: str | int, from_: datetime | date, to: datetime | date) -> list[types.LessonLogEntries]: """[GET] persons/{person}/lesson-log-entries&from={from}&to={to} Список отметок о посещаемости обучающегося за интервал времени Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало интервала (``datetime.datetime`` / ``datetime.date``) to: конец интервала (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/LessonLog/LessonLog_GetByPersonAndPeriod """ return await self.get(f"persons/{await self.check_person(person)}/lesson-log-entries&from={self.datetime_to_string(from_)}&to={self.datetime_to_string(to)}", model=types.LessonLogEntries, is_list=True) async def get_lesson(self, lesson: str | int) -> types.Lesson: """[GET] lesssons/{lesson} Получить урок с заданным id Параметры: lesson: id урока (int / str) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Lessons/Lessons_Get """ return await self.get(f"lessons/{lesson}", model=types.Lesson) async def get_eduGroup_lesson_by_period(self, group: int | str, from_: datetime | date, to: datetime | date) -> list[types.Lesson]: """[GET] edu-groups/{group}/lessons/{from_}/{to} Уроки группы за период Параметры: group: id класса или учебной группы (``str`` / ``int``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Lessons/Lessons_GetByGroupAndPeriod """ return await self.get(f"edu-groups/{group}/lessons/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Lesson, is_list=True) async def get_eduGroup_lesson_by_period_and_subject(self, group: int | str, subject: int | str, from_: datetime | date, to: datetime | date) -> list[types.Lesson]: """[GET] edu-groups/{group}/subjects/{subject}/lessons/{from_}/{to} Уроки группы по предмету за период Параметры: group: id класса или учебной группы (``str`` / ``int``) subject: id предмета (``str`` / ``int``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Lessons/Lessons_GetByGroupAndPeriodAndSubject """ return await self.get(f"edu-groups/{group}/subjects/{subject}/lessons/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Lesson, is_list=True) async def get_work_marks_histogram(self, workID: int | str) -> types.MarksHistogram: """[GET] works/{workID}/marks/histogram Получение деперсонализированной гистограмы оценок всего класса по идентификатору работы Параметры: workID: id работы на уроке (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkHistograms/MarkHistograms_GetMarksByWork """ return await self.get(f"works/{workID}/marks/histogram", model=types.MarksHistogram) async def get_marks_histogram_by_period(self, periodID: int | str, subjectID: int | str, groupID: int | str) -> types.MarksHistogramByPeriod: """[GET] periods/{periodID}/subjects/{subjectID}/groups/{groupID}/marks/histogram Получение деперсонализированной гистограмы оценок всего класса за отчетный период Параметры: periodID: id отчетного периода (``int`` / ``str``) subjectID: id предмета (``int`` / ``str``) groupID: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkHistograms/MarkHistograms_GetMarksByPeriod """ return await self.get(f"periods/{periodID}/subjects/{subjectID}/groups/{groupID}/marks/histogram", model=types.MarksHistogramByPeriod) async def get_mark(self, mark: int | str) -> types.Mark: """[GET] marks/{mark} Оценка Параметры: mark: id оценки (не work-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_Get """ return await self.get(f"marks/{mark}", model=types.Mark) async def get_work_marks(self, work: int | str) -> list[types.Mark]: """[GET] works/{work}/marks Список оценок за определенную работу на уроке Параметры: work: id работы (не mark-id или lesson-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByWork """ return await self.get(f"works/{work}/marks", model=types.Mark, is_list=True) async def get_lesson_marks(self, lesson: int | str) -> list[types.Mark]: """[GET] lessons/{lesson}/marks Оценки на уроке Параметры: lessson: id урока (не mark-id или work-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByLesson """ return await self.get(f"lessons/{lesson}/marks", model=types.Mark, is_list=True) async def get_eduGroup_marks(self, group: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] edu-groups/{group}/marks/{from_}/{to} Оценки учебной группы за период Параметры: group: id учебной группы или класса (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByGroup """ return await self.get(f"edu-groups/{group}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) async def get_eduGroup_marks_by_subject(self, group: int | str, subject: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] edu-groups/{group}/subjects/{subject}/marks/{from_}/{to} Оценки учебной группы по предмету за период Параметры: group: id учебной группы или класса (``int`` / ``str``) subject: id предмета (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByGroupAndSubject """ return await self.get(f"edu-groups/{group}/subjects/{subject}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) async def get_person_marks_in_school(self, person: int | str, school: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/schools/{school}/marks/{from}/{to} Оценки персоны в школе за период Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) school: id школы (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetBySchoolAndPersonAndPeriod """ return await self.get(f"persons/{await self.check_person(person)}/schools/{school}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) async def get_person_marks_in_eduGroup(self, person: int | str, group: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/edu-groups/{group}/marks/{from}/{to} Оценки персоны в учебной группе за период Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) group: id учебной группы (``int`` / ``str``) (``EduGroup``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByGroupAndPersonAndPeriod """ return await self.get(f"persons/{await self.check_person(person)}/edu-groups/{group}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) async def get_person_marks_on_lesson(self, person: int | str, lesson: str | int) -> list[types.Mark]: """[GET] persons/{person}/lessons/{lesson}/marks Оценки персоны за урок Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) lesson: id урока (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByLessonAndPerson """ return await self.get(f"persons/{await self.check_person(person)}/lessons/{lesson}/marks", model=types.Mark, is_list=True) async def get_person_marks_on_work(self, person: int | str, work: str | int) -> list[types.Mark]: """[GET] persons/{person}/lessons/{lesson}/marks Оценки персоны за работу Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) work: id работы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByWorkAndPerson """ return await self.get(f"persons/{await self.check_person(person)}/works/{work}/marks", model=types.Mark, is_list=True) async def get_person_marks_by_subject(self, person: int | str, subject: int | str, from_: datetime | date, to: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/subjects/{subject}/marks/{from_}/{to} Оценки персоны по предмету за период Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало периода (``datetime.datetime`` / ``datetime.date``) to: конец периода (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByPersonAndSubject """ return await self.get(f"persons/{person}/subjects/{subject}/marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}", model=types.Mark, is_list=True) async def get_person_marks_on_lesson_by_date(self, person: int | str, date: datetime | date) -> list[types.Mark]: """[GET] lessons/{date}/persons/{person}/marks Оценки персоны по дате урока Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) date: дата урока (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByPersonAndLessonDate """ return await self.get(f"lessons/{date}/persons/{await self.check_person(person)}/marks", model=types.Mark, is_list=True) async def get_person_marks_by_date(self, person: int | str, date: datetime | date) -> list[types.Mark]: """[GET] persons/{person}/marks/{date} Оценки персоны по дате выставления оценки Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) date: дата выставления оценки (``datetime.datetime`` / ``datetime.date``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Marks/Marks_GetByPersonAndMarkDate """ return await self.get(f"persons/{await self.check_person(person)}/marks/{date}", model=types.Mark, is_list=True) async def get_marks_values(self) -> dict[str, list[str, None]]: """[GET] marks/values Метод возвращает все поддерживаемые системы (типы) оценок и все возможные оценки в каждой из систем.\n Например, для системы "mark5" возвращается массив из следующих оценок: "mark5" : ["1-","1","1+","2-","2","2+","3-","3","3+","4-","4","4+","5-","5","5+"] Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkValues/MarkValues_GetAll """ return await self.get("marks/values", return_json=True) async def get_marks_values_by_type(self, type: str) -> list[str]: """[GET] marks/values/type/{type} Метод возвращает все возможные оценки в запрашиваемой системе (типе) оценок.\n Чтобы узнать, какие типы поддерживаются нужно предварительно делать запрос marks/values без параметров.\n Например, для запроса marks/values/type/mark5 ответом будет list["1-", "1", "1+", "2-", "2", "2+", "3-", "3", "3+", "4-", "4", "4+", "5-", "5", "5+"]. Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/MarkValues/MarkValues_GetByType """ return await self.get(f"marks/values/type/{type}", return_json=True) async def get_recent_marks(self, person: str | int, group: int | str, fromDate: datetime | date = None, subject: int | str = None, limit: int = 10) -> types.RecentMarks: """[GET] persons/{person}/group/{group}/recentmarks Последние оценки/отметки посещаемости персоны по предмету, указанному в параметре subject, начиная с даты определенном в параметре fromDate, и с ограничением на выводимое количество указанном в параметре limit Параметры: ``person``: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) ``group``: id класса или учебной группы (``int`` / ``str``) ``*OPTIONAL*``: ``fromDate``: (``datetime.datetime`` / ``datetime.date``) Дата и время, начиная от которого будут выводится оценки/отметки посещаемости. Если не указанно, то результат будет выводится с сегодняшнего дня включительно. Параметр применим для постраничного вывода оценок/отметок посещаемости по конкретному предмету ``subject``: (``int`` / ``str``) id предмета. Если не задан, то результат будет включать в себя оценки/отметки посещаемости по всем предметам, но по каждому предмету будет накладываться ограничение указанном в параметре limit ``limit``: (``int`` = 10) Количество оценок по предмету. Если не задан, то будет применено ограничение по умолчанию, равное 10. Значение должно быть задано в интервале от 1 до 100. """ params = {"limit": str(limit)} if fromDate: params["fromDate"] = self.datetime_to_string(fromDate) if subject: params["subject"] = str(subject) return await self.get(f"persons/{await self.check_person(person)}/group/{group}/recentmarks", params=params, model=types.RecentMarks) async def get_task(self, task: str | int) -> types.Task: """[GET] tasks/{task} Домашнее задание Параметры: task: task-id домашнего задания (не work-id!) (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_Get """ return await self.get(f"tasks/{task}", model=types.Task) async def get_lesson_tasks(self, lesson: str | int) -> list[types.Task]: """[GET] lessons/{lesson}/tasks Список Домашних заданий на урок Параметры: lesson: id урока (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_GetByLesson """ return await self.get(f"lessons/{lesson}/tasks", model=types.Task, is_list=True) async def get_work_tasks(self, work: str | int, persons: str | int | list[int | str]) -> list[types.Task]: """[GET] works/{work}/tasks Список Домашних заданий Параметры: work: id работы (homework) (``str`` / ``int``) persons: id (одно или несколько, обернутых в список) персоны (``int`` / ``str`` / ``list[str | int]``) (``"me"``, для текущего пользователя (можно и в списке указать)) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_GetByWork """ return await self.get(f"works/{work}/tasks{'?persons={}'.format(await self.check_person(persons) if (isinstance(persons, int) or isinstance(persons, str)) else '&persons='.join([await self.check_person(i) for i in persons]))}", model=types.Task, is_list=True) async def get_undone_person_tasks(self, personId: str | int = "me") -> list[types.Task]: """[GET] persons/{personId}/undone Список невыполненных Домашних заданий обучающегося с истекшим сроком выполнения Параметры: personId: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_ListNotCompletedByPersonId """ return await self.get(f"persons/{await self.check_person(personId)}/undone", model=types.Task, is_list=True) async def get_person_tasks(self, person: str | int, subject: int | str, from_: datetime | date, to: datetime | date, pageNumber: int = None, pageSize: int = None) -> list[types.Task]: """[GET] persons{person}/tasks Список Домашних заданий ученика по предмету Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) subject: id предмета (``int`` / ``str``) from_: начало интервала дат (``datetime.datetime``) to: конец интервала дат (``datetime.datetime``) pageNumber: номер страницы (``int``) (``*optional*``) pageSize: размер страницы (``int``) (``*optional*``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Tasks/Tasks_GetByPersonAndSubject """ params = { "subject": str(subject), "from": self.datetime_to_string(from_), "to": self.datetime_to_string(to), } if pageNumber: params["pageNumber"] = str(pageNumber) if pageSize: params["pageSize"] = str(pageSize) return await self.get(f"persons/{await self.check_person(person)}/tasks", model=types.Task, is_list=True, params=params) async def get_eduGroup_subjects(self, eduGroup: int | str) -> list[types.Subject]: """[GET] edu-groups/{eduGroup}/subjects Список предметов, преподаваемых в классе в текущем отчетном периоде Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Subjects/Subjects_GetByEduGroup """ return await self.get(f"edu-groups/{eduGroup}/subjects", model=types.Subject, is_list=True) async def get_school_subjects(self, school: int | str) -> list[types.Subject]: """[GET] schools/{school}/subjects Список предметов, преподаваемых в образовательной организации в текущем учебном году Параметры: school: id школы (``int`` / ``str``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Subjects/Subjects_GetSchoolSubjects """ return await self.get(f"schools/{school}/subjects", model=types.Subject, is_list=True) async def get_school_parameters(self, school: int | str) -> types.SchoolParameters: """[GET] schools/{school}/parameters Параметры общеобразовательной организации Параметры: school: id школы (``int`` / ``str``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/SchoolsParameters/SchoolsParameters_Get """ return await self.get(f"schools/{school}/parameters", model=types.SchoolParameters) async def get_school(self, school: int | str) -> types.School: """[GET] schools/{school} Профиль школы Параметры: school: id школы (``int`` / ``str``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schools/Schools_Get """ return await self.get(f"schools/{school}", model=types.School) async def get_school_membership(self, school: int | str, schoolMembershipType: str = "Staff") -> list[types.Person]: """[GET] schools/{school}/membership Список профилей пользователей школы Параметры: school: id школы (``int`` / ``str``) schoolMembershipType: тип запрашиваемых пользователей (``"Staff" / "Admins"``) (``str``). По умолчанию стоит ``"Staff"`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schools/Schools_GetSchoolMembership """ return await self.get(f"schools/{school}/membership?schoolMembershipType={schoolMembershipType}", model=types.Person, is_list=True) async def get_person_schools(self, excludeOrganizations: bool = "") -> list[types.School]: """[GET] schools/person-schools Список образовательных организаций текущего пользователя Параметры: excludeOrganizations: - (``bool``) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schools/Schools_GetPersonSchools """ return await self.get( "schools/person-schools".format( f"?excludeOrganizations={'true' if excludeOrganizations is True else 'false' if excludeOrganizations is False else ''}" if ( not isinstance(excludeOrganizations, str) and excludeOrganizations in [True, False]) else "" ), model=types.School, is_list=True) async def get_person_schedules(self, person: int | str, group: int | str, startDate: datetime | date, endDate: datetime | date) -> types.Schedule: """[GET] persons/{person}/groups/{group}/schedules Расписание ученика Параметры: person: id персоны (``int`` / ``str``) (``"me"``, для текущего пользователя) group: id учебной группы или класса (``int`` / ``str``) (``EduGroupID``) startDate: дата начала периода (``datetime.datetime``) endDate: дата завершения периода (``datetime.datetime``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Schedules/Schedules_GetByPersonAndPeriod """ return await self.get(f"persons/{await self.check_person(person)}/groups/{group}/schedules?startDate={self.datetime_to_string(startDate)}&endDate={self.datetime_to_string(endDate)}", model=types.Schedule) async def get_eduGroup_reporting_periods(self, eduGroup: int | str) -> list[types.ReportingPeriod]: """[GET] edu-groups/{eduGroup}/reporting-periods Список отчётных периодов для класса или учебной группы Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/ReportingPeriods/ReportingPeriods_GetByEduGroup """ return await self.get(f"edu-groups/{eduGroup}/reporting-periods", model=types.ReportingPeriod, is_list=True) async def get_eduGroup_reporting_periods_all(self, eduGroup: int | str) -> types.ReportingPeriodEduGroup: """[GET] edu-groups/{eduGroup}/reporting-periods-group Группа отчётных периодов для класса или учебной группы Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/ReportingPeriods/ReportingPeriods_GetGroupReportingPeriodsGroup """ return await self.get(f"edu-groups/{eduGroup}/reporting-periods-group", model=types.ReportingPeriodEduGroup) async def get_person(self, person: int | str = "me") -> types.Person: """[GET] persons/{person} Профиль персоны Параметры: person: id персоны (``"me"``, или пусто для текущего пользователя) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Persons/Persons_Get """ return await self.get(f"persons/{await self.check_person(person)}", model=types.Person) async def get_eduGroup_students(self, eduGroup: int | str) -> list[types.Person]: """[GET] edu-groups/{eduGroup}/students Список учеников в классе или учебной группе Параметры: eduGroup: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Persons/Persons_GetByEduGroup_0 """ return await self.get(f"edu-groups/{eduGroup}/students", model=types.Person, is_list=True) async def search_person( self, lastName: str = None, firstName: str = None, middleName: str = None, snils: str = None, birthday: date = None, ) -> None | list[types.Person]: """[GET] person/search Поиск персоны Параметры: lastName: Фамилия (``str``, ``*optional*``) firstName: Имя (``str``, ``*optional*``) middleName: Отчество (``str``, ``*optional*``) snils: СНИЛС (``str``, ``*optional*``) birthday: ДАТА РОЖДЕНИЯ (``datetime.date``, ``*optional*``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Persons/Persons_Search """ params = {} if lastName: params["lastName"] = lastName if firstName: params["firstName"] = firstName if middleName: params["middleName"] = middleName if snils: params["snils"] = snils if birthday: params["birthday"] = self.date_to_string(birthday) return await self.get("person/search", params=params, model=types.Person, is_list=True) async def get_eduGroup_teachers(self, group: int | str) -> list[types.EduGroupTeacher]: """[GET] edu-groups/{group}/teachers Список учителей, которые ведут уроки в данной группе, учитываются уроки от недели назад и на 30 дней вперед Параметры: group: id класса или учебной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Teacher/Teacher_GetEduGroupTeachers """ return await self.get(f'edu-groups/{group}/teachers', model=types.EduGroupTeacher, is_list=True) async def get_school_teachers(self, school: int | str) -> list[types.SchoolTeacher]: """[GET] teacher/{teacher}/students Список преподавателей в выбранной образовательной организации Параметры: teacher: person-id учителя (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Teacher/Teacher_GetSchoolTeachers """ return await self.get(f'schools/{school}/teachers', model=types.SchoolTeacher, is_list=True) async def get_teacher_students(self, teacher: int | str) -> list[types.TeacherStudent]: """[GET] teacher/{teacher}/students Список учеников для учителя который ведет уроки у этих учеников(они должны быть в расписании) от недели назад и на 30 дней вперед Параметры: teacher: person-id учителя (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Teacher/Teacher_GetStudentsByTeacher """ return await self.get(f'teacher/{teacher}/students', model=types.TeacherStudent, is_list=True) async def get_eduGroup_timetable(self, eduGroup: int | str) -> types.TimeTable: """[GET] edu-groups/{eduGroup}/timetables Получение расписания учебной группы Параметры: eduGroup: id класса или учбеной группы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Timetables/Timetables_GetByEduGroup """ return await self.get(f'edu-groups/{eduGroup}/timetables', model=types.TimeTable) async def get_school_timetable(self, school: int | str) -> types.TimeTable: """[GET] schools/{school}/timetables Получение расписания школы Параметры: school: id школы (``int`` / ``str``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Timetables/Timetables_GetBySchool """ return await self.get(f'schools/{school}/timetables', model=types.TimeTable) async def get_user_feed(self, date: datetime | date, childPersonId: int | str = None, limit: int | str = None) -> types.UserFeed: """[GET] users/me/feed Лента пользователя Параметры: date: Дата начала временного интервала (``datetime.datetime``) childPersonId: id персоны ребёнка (``int`` | ``str``) (``optional``) limit: Ограничение временного интервала в днях (``int``) (``optional``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserFeeds/UserFeeds_GetUserFeed """ params = {"date": self.datetime_to_string(date)} if childPersonId: params["childPersonId"] = childPersonId if limit: params["limit"] = limit return await self.get('users/me/feed', model=types.UserFeed, params=params) async def get_my_children_relatives(self) -> list[types.UserRelatives | None] | None: """[GET] users/me/childrenrelatives Список id всех родственных связей детей произвольного пользователя Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserRelatives/UserRelatives_GetOwnChildrenRelatives """ return await self.get(f"users/me/childrenrelatives", model=types.UserRelatives, is_list=True) async def get_my_childrens(self) -> list[int | None] | None: """[GET] users/me/children Список id пользователей детей текущего пользователя Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserRelatives/UserRelatives_GetOwnChildren """ return await self.get(f"users/me/children", return_json=True) async def get_user_relatives(self, user: str | int = "me") -> types.UserRelatives: """[GET] users/{user}/relatives | users/me/relatives Получение всех родственных связей произвольного/текущего пользователя. Параметры: user: id пользователя (``int`` / ``str``) (``"me"``, для текущего пользователя) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/UserRelatives/UserRelatives_GetRelatives """ return await self.get(f"users/{user}/relatives", model=types.UserRelatives) async def get_user(self, user: str | int = "me") -> types.User: """[GET] users/{user} | users/me Профиль текущего пользователя (или по ID) Параметры: user: id пользователя (``int`` / ``str``) (``"me"``, для текущего пользователя) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Users/Users_Get """ return await self.get(f"users/{user}", model=types.User) async def get_user_roles(self, user: str | int = "me") -> list[int | None] | None: """[GET] users/{user}/roles | users/me/roles Профиль текущего пользователя (или по ID) Параметры: user: id пользователя (``int`` / ``str``) (``"me"``, для текущего пользователя) Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Users/Users_Get """ return await self.get(f"users/{user}/roles", return_json=True) async def get_weighted_average_marks(self, group: int | str, from_: datetime | date, to: datetime | date) -> types.WeightedAverageMarks: """[GET] edu-groups/{group}/wa-marks/{from_}/{to} Получить взвешенные оценки за период. Параметры: group: id класса или учебной группы (``int`` / ``str``) (``EduGroup``) from_: начало периода (``datetime.datetime``) to: конец периода (``datetime.datetime``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/WeightedAverageMarks/WeightedAverageMarks_GetGroupAverageMarks """ return await self.get(f'edu-groups/{group}/wa-marks/{self.datetime_to_string(from_)}/{self.datetime_to_string(to)}', model=types.WeightedAverageMarks) async def get_lesson_works(self, lesson: str | int) -> types.Work: """[GET] lessons/{lesson}/works Список работ на уроке Параметры: lesson: id урока (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Works/Works_GetByLesson_0 """ return await self.get(f'lessons/{lesson}/works', model=types.Work, is_list=True) async def get_work(self, work: str | int) -> types.Work: """[GET] works/{work} Работа на уроке по ID Параметры: work: id работы (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Works/Works_Get """ return await self.get(f'works/{work}', model=types.Work) async def edit_homework_status(self, work: int | str, person: str | int = "me", change: dict[str, str] = {"action": "StartWorking"}): """[POST] works/{work}/persons/{person}/status Изменить статус выполнения домашней работы учащимся. Параметры: work: id урока (``int`` / ``str``) person: id персоны (``int`` / ``str``) (``"me"``, для себя) change: статус (``dict[str, str]``) : Пример -> ``{"action": "StartWorking"}`` Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/Works/Works_ChangeStatus """ return await self.post(f'works/{work}/persons/{await self.check_person(person)}/status', return_json=True, data=change) async def get_school_work_types(self, school: str | int) -> list[types.WorkType]: """[GET] work-types/{school} Получение списка всех типов работ школы Параметры: school: id школы (``str`` / ``int``) Права доступа: ``EducationalInfo`` Docs: https://api.school.mosreg.ru/partners/swagger/ui/index#!/WorkTypes/WorkTypes_Get """ return await self.get(f'work-types/{school}', model=types.WorkType, is_list=True)
/school_mosreg_api-0.9.4.tar.gz/school_mosreg_api-0.9.4/school_mosreg_api/asyncapi/methods.py
0.554229
0.250987
methods.py
pypi
# cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=2018133209 from school_sdk.client.api import BaseCrawler class Score(BaseCrawler): year = None term = None def __init__(self, user_client) -> None: super().__init__(user_client) self.endpoints: dict = self.school.config['url_endpoints'] self.raw_score = None self.score_dict:dict = {} self.score_list:list = [] def get_score(self, **kwargs): return self.get_score_dict(**kwargs) def get_score_list(self, **kwargs): """获取成绩清单-列表 Returns: list: 成绩列表 """ if not self.score_list: self.parse(**kwargs) return self.score_list def get_score_dict(self, **kwargs): """获取成绩清单-字典 Returns: dict: 成绩字典清单 """ if not self.score_dict: self.parse(**kwargs) if kwargs.get('year') != self.year or kwargs.get('term') != self.term: self.raw_score = None self.parse(**kwargs) return self.score_dict def parse(self, **kwargs): """解析数据 """ if self.raw_score is None: self.load_score(**kwargs) self._parse(self.raw_score) def load_score(self, **kwargs) -> None: """加载课表 """ self.raw_score = self._get_score(**kwargs) def _get_score(self, year: int, term: int = 1, **kwargs): """获取教务系统成绩 Args: year (int): 学年 term (int, optional): 学期. Defaults to 1. Returns: json: json数据 """ self.year = year self.term = term url = self.endpoints['SCORE']['API'] params = { 'doType': 'query', 'gnmkdm': 'N305005', 'su': self.account } data = { 'xnm': year, 'xqm': self.TERM.get(term, 3), '_search': False, 'nd': self.t, 'queryModel.showCount': 500, 'queryModel.currentPage': 1, 'queryModel.sortName': None, 'queryModel.sortOrder': 'asc', 'time': 4, } res = self.post(url=url, params=params, data=data, **kwargs) return res.json() def _parse(self, raw: dict): # kcmc -> 课程名称 # kch -> 课程号 # kcxzmc -> 课程性质名称 # kcbj -> 课程标记 # jsxm -> 教师姓名 # tjsj -> 提交时间 # khfsmc -> 考核方式 # ksxz -> 考试性质 # cj -> 成绩 # bfzcj -> 百分制成绩 # xf -> 学分 # kkbmmc -> 开课部门名称 # njdm_id -> 年级代码 # jd -> 绩点 # bzxx -> 备注信息 """解析教务系统成绩 Args: raw (dict): 教务系统的原始数据 """ self.score_dict:dict = {} self.score_list:list = [] items = raw.get('items') for item in items: format_item = { "course_name": item.get('kcmc'), # kcmc -> 课程名称 "course_code": item.get('kch'), # kch -> 课程号 'course_nature': item.get('kcxzmc'), # kcxzmc -> 课程性质名称 'course_target': item.get('kcbj'), # kcbj -> 课程标记 'teacher': item.get('jsxm'), # jsxm -> 教师姓名 'submitted_at': item.get('tjsj'), # tjsj -> 提交时间 'exam_method': item.get('khfsmc'), # khfsmc -> 考核方式 'exam_nature': item.get('ksxz'), # ksxz -> 考试性质 'exam_result': item.get('cj'), # cj -> 成绩 'exam_score': item.get('bfzcj'), # bfzcj -> 百分制成绩 'credit': item.get('xf'), # xf -> 学分 'course_group': item.get('kkbmmc'), # kkbmmc -> 开课部门名称 'grade': item.get('njdm_id'), # njdm_id -> 年级代码 'grade_point': item.get('jd'), # jd -> 绩点 'reason': item.get('bzxx') # bzxx -> 备注信息 } self.score_list.append(format_item) self.score_dict.setdefault(item.get('kcmc'), format_item)
/school-sdk-1.5.0.tar.gz/school-sdk-1.5.0/school_sdk/client/api/score.py
0.469277
0.192501
score.py
pypi
import requests from majormode.perseus.constant.place import AddressComponentType from majormode.perseus.model.geolocation import GeoPoint from majormode.perseus.model.place import Place GOOGLE_GEOCODING_API_URL = 'https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}' GOOGLE_PERSEUS_ADDRESS_COMPONENTS_MAPPING = { 'street_number': AddressComponentType.house_number, 'route': AddressComponentType.street_name, 'administrative_area_level_1': AddressComponentType.city, 'administrative_area_level_2': AddressComponentType.district, 'administrative_area_level_3': AddressComponentType.ward, 'country': AddressComponentType.country } class GoogleGeocoder: def __init__(self, api_key): self.__api_key = api_key def __parse_geometry(self, data): if data: location_data = data.get('location') location = GeoPoint(location_data.get('lat'), location_data.get('lng')) return location def __parse_place(self, data): address = self.__parse_address_components(data.get('address_components')) formatted_address = data.get('formatted_address') address[AddressComponentType.geocoded_address] = formatted_address location = self.__parse_geometry(data.get('geometry')) place = Place(location, address=address) return place def __parse_address_components(self, data): address_components = {} if data: for address_component in data: component_value = address_component.get( 'long_name', address_component.get('short_name')) for google_component_type in address_component['types']: component_type = GOOGLE_PERSEUS_ADDRESS_COMPONENTS_MAPPING.get(google_component_type) address_components[component_type] = component_value break return address_components def geocode_address(self, formatted_address): response = requests.get(GOOGLE_GEOCODING_API_URL.format( formatted_address, self.__api_key)) if response.status_code != requests.codes.ok: response.raise_for_status() data = response.json() if data['status'] != 'OK': raise Exception(data['error_message']) results = data['results'] return None if len(results) == 0 else self.__parse_place(results[0])
/school_transport_application_form_tool-1.2.15-py3-none-any.whl/intek/application/geocoding.py
0.538012
0.331864
geocoding.py
pypi