text
stringlengths
1
1.02k
class_index
int64
0
305
source
stringclasses
77 values
class FillMaskOutputElement(BaseInferenceType): """Outputs of inference for the Fill Mask task""" score: float """The corresponding probability""" sequence: str """The corresponding input with the mask token prediction.""" token: int """The predicted token id (to replace the masked one).""" token_str: Any fill_mask_output_token_str: Optional[str] = None """The predicted token (to replace the masked one)."""
202
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/fill_mask.py
class ImageToTextGenerationParameters(BaseInferenceType): """Parametrization of the text generation process"""
203
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
do_sample: Optional[bool] = None """Whether to use sampling instead of greedy decoding when generating new tokens.""" early_stopping: Optional[Union[bool, "ImageToTextEarlyStoppingEnum"]] = None """Controls the stopping condition for beam-based methods.""" epsilon_cutoff: Optional[float] = None """If set to float strictly between 0 and 1, only tokens with a conditional probability greater than epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details. """ eta_cutoff: Optional[float] = None """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
203
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details. """ max_length: Optional[int] = None """The maximum length (in tokens) of the generated text, including the input.""" max_new_tokens: Optional[int] = None """The maximum number of tokens to generate. Takes precedence over max_length.""" min_length: Optional[int] = None """The minimum length (in tokens) of the generated text, including the input.""" min_new_tokens: Optional[int] = None """The minimum number of tokens to generate. Takes precedence over min_length.""" num_beam_groups: Optional[int] = None """Number of groups to divide num_beams into in order to ensure diversity among different
203
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. """ num_beams: Optional[int] = None """Number of beams to use for beam search.""" penalty_alpha: Optional[float] = None """The value balances the model confidence and the degeneration penalty in contrastive search decoding. """ temperature: Optional[float] = None """The value used to modulate the next token probabilities.""" top_k: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" top_p: Optional[float] = None """If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. """ typical_p: Optional[float] = None """Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next,
203
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. """ use_cache: Optional[bool] = None """Whether the model should use the past last key/values attentions to speed up decoding"""
203
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
class ImageToTextParameters(BaseInferenceType): """Additional inference parameters for Image To Text""" max_new_tokens: Optional[int] = None """The amount of maximum tokens to generate.""" # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers generate_kwargs: Optional[ImageToTextGenerationParameters] = None """Parametrization of the text generation process"""
204
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
class ImageToTextInput(BaseInferenceType): """Inputs for Image To Text inference""" inputs: Any """The input image data""" parameters: Optional[ImageToTextParameters] = None """Additional inference parameters for Image To Text"""
205
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
class ImageToTextOutput(BaseInferenceType): """Outputs of inference for the Image To Text task""" generated_text: Any image_to_text_output_generated_text: Optional[str] = None """The generated text."""
206
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_text.py
class AudioClassificationParameters(BaseInferenceType): """Additional inference parameters for Audio Classification""" function_to_apply: Optional["AudioClassificationOutputTransform"] = None """The function to apply to the model outputs in order to retrieve the scores.""" top_k: Optional[int] = None """When specified, limits the output to the top K most probable classes."""
207
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/audio_classification.py
class AudioClassificationInput(BaseInferenceType): """Inputs for Audio Classification inference""" inputs: str """The input audio data as a base64-encoded string. If no `parameters` are provided, you can also provide the audio data as a raw bytes payload. """ parameters: Optional[AudioClassificationParameters] = None """Additional inference parameters for Audio Classification"""
208
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/audio_classification.py
class AudioClassificationOutputElement(BaseInferenceType): """Outputs for Audio Classification inference""" label: str """The predicted class label.""" score: float """The corresponding probability."""
209
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/audio_classification.py
class VideoClassificationParameters(BaseInferenceType): """Additional inference parameters for Video Classification""" frame_sampling_rate: Optional[int] = None """The sampling rate used to select frames from the video.""" function_to_apply: Optional["VideoClassificationOutputTransform"] = None """The function to apply to the model outputs in order to retrieve the scores.""" num_frames: Optional[int] = None """The number of sampled frames to consider for classification.""" top_k: Optional[int] = None """When specified, limits the output to the top K most probable classes."""
210
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/video_classification.py
class VideoClassificationInput(BaseInferenceType): """Inputs for Video Classification inference""" inputs: Any """The input video data""" parameters: Optional[VideoClassificationParameters] = None """Additional inference parameters for Video Classification"""
211
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/video_classification.py
class VideoClassificationOutputElement(BaseInferenceType): """Outputs of inference for the Video Classification task""" label: str """The predicted class label.""" score: float """The corresponding probability."""
212
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/video_classification.py
class ImageToImageTargetSize(BaseInferenceType): """The size in pixel of the output image.""" height: int width: int
213
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_image.py
class ImageToImageParameters(BaseInferenceType): """Additional inference parameters for Image To Image""" guidance_scale: Optional[float] = None """For diffusion models. A higher guidance scale value encourages the model to generate images closely linked to the text prompt at the expense of lower image quality. """ negative_prompt: Optional[List[str]] = None """One or several prompt to guide what NOT to include in image generation.""" num_inference_steps: Optional[int] = None """For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. """ target_size: Optional[ImageToImageTargetSize] = None """The size in pixel of the output image."""
214
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_image.py
class ImageToImageInput(BaseInferenceType): """Inputs for Image To Image inference""" inputs: str """The input image data as a base64-encoded string. If no `parameters` are provided, you can also provide the image data as a raw bytes payload. """ parameters: Optional[ImageToImageParameters] = None """Additional inference parameters for Image To Image"""
215
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_image.py
class ImageToImageOutput(BaseInferenceType): """Outputs of inference for the Image To Image task""" image: Any """The output image returned as raw bytes in the payload."""
216
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_to_image.py
class SentenceSimilarityInputData(BaseInferenceType): sentences: List[str] """A list of strings which will be compared against the source_sentence.""" source_sentence: str """The string that you wish to compare the other strings with. This can be a phrase, sentence, or longer passage, depending on the model being used. """
217
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/sentence_similarity.py
class SentenceSimilarityInput(BaseInferenceType): """Inputs for Sentence similarity inference""" inputs: SentenceSimilarityInputData parameters: Optional[Dict[str, Any]] = None """Additional inference parameters for Sentence Similarity"""
218
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/sentence_similarity.py
class FeatureExtractionInput(BaseInferenceType): """Feature Extraction Input. Auto-generated from TEI specs. For more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts. """ inputs: str """The text to embed.""" normalize: Optional[bool] = None prompt_name: Optional[str] = None """The name of the prompt that should be used by for encoding. If not set, no prompt will be applied. Must be a key in the `sentence-transformers` configuration `prompts` dictionary. For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ", ...}, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the prompt text will be prepended before any text to encode. """ truncate: Optional[bool] = None truncation_direction: Optional["FeatureExtractionInputTruncationDirection"] = None
219
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/feature_extraction.py
class DepthEstimationInput(BaseInferenceType): """Inputs for Depth Estimation inference""" inputs: Any """The input image data""" parameters: Optional[Dict[str, Any]] = None """Additional inference parameters for Depth Estimation"""
220
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/depth_estimation.py
class DepthEstimationOutput(BaseInferenceType): """Outputs of inference for the Depth Estimation task""" depth: Any """The predicted depth as an image""" predicted_depth: Any """The predicted depth as a tensor"""
221
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/depth_estimation.py
class TextToSpeechGenerationParameters(BaseInferenceType): """Parametrization of the text generation process"""
222
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
do_sample: Optional[bool] = None """Whether to use sampling instead of greedy decoding when generating new tokens.""" early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None """Controls the stopping condition for beam-based methods.""" epsilon_cutoff: Optional[float] = None """If set to float strictly between 0 and 1, only tokens with a conditional probability greater than epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details. """ eta_cutoff: Optional[float] = None """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
222
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details. """ max_length: Optional[int] = None """The maximum length (in tokens) of the generated text, including the input.""" max_new_tokens: Optional[int] = None """The maximum number of tokens to generate. Takes precedence over max_length.""" min_length: Optional[int] = None """The minimum length (in tokens) of the generated text, including the input.""" min_new_tokens: Optional[int] = None """The minimum number of tokens to generate. Takes precedence over min_length.""" num_beam_groups: Optional[int] = None """Number of groups to divide num_beams into in order to ensure diversity among different
222
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. """ num_beams: Optional[int] = None """Number of beams to use for beam search.""" penalty_alpha: Optional[float] = None """The value balances the model confidence and the degeneration penalty in contrastive search decoding. """ temperature: Optional[float] = None """The value used to modulate the next token probabilities.""" top_k: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" top_p: Optional[float] = None """If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. """ typical_p: Optional[float] = None """Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next,
222
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. """ use_cache: Optional[bool] = None """Whether the model should use the past last key/values attentions to speed up decoding"""
222
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
class TextToSpeechParameters(BaseInferenceType): """Additional inference parameters for Text To Speech""" # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers generate_kwargs: Optional[TextToSpeechGenerationParameters] = None """Parametrization of the text generation process"""
223
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
class TextToSpeechInput(BaseInferenceType): """Inputs for Text To Speech inference""" inputs: str """The input text data""" parameters: Optional[TextToSpeechParameters] = None """Additional inference parameters for Text To Speech"""
224
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
class TextToSpeechOutput(BaseInferenceType): """Outputs for Text to Speech inference Outputs of inference for the Text To Audio task """ audio: Any """The generated audio waveform.""" sampling_rate: Any text_to_speech_output_sampling_rate: Optional[float] = None """The sampling rate of the generated audio waveform."""
225
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_speech.py
class ZeroShotClassificationParameters(BaseInferenceType): """Additional inference parameters for Zero Shot Classification""" candidate_labels: List[str] """The set of possible class labels to classify the text into.""" hypothesis_template: Optional[str] = None """The sentence used in conjunction with `candidate_labels` to attempt the text classification by replacing the placeholder with the candidate labels. """ multi_label: Optional[bool] = None """Whether multiple candidate labels can be true. If false, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If true, the labels are considered independent and probabilities are normalized for each candidate. """
226
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_classification.py
class ZeroShotClassificationInput(BaseInferenceType): """Inputs for Zero Shot Classification inference""" inputs: str """The text to classify""" parameters: ZeroShotClassificationParameters """Additional inference parameters for Zero Shot Classification"""
227
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_classification.py
class ZeroShotClassificationOutputElement(BaseInferenceType): """Outputs of inference for the Zero Shot Classification task""" label: str """The predicted class label.""" score: float """The corresponding probability."""
228
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_classification.py
class TextClassificationParameters(BaseInferenceType): """Additional inference parameters for Text Classification""" function_to_apply: Optional["TextClassificationOutputTransform"] = None """The function to apply to the model outputs in order to retrieve the scores.""" top_k: Optional[int] = None """When specified, limits the output to the top K most probable classes."""
229
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_classification.py
class TextClassificationInput(BaseInferenceType): """Inputs for Text Classification inference""" inputs: str """The text to classify""" parameters: Optional[TextClassificationParameters] = None """Additional inference parameters for Text Classification"""
230
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_classification.py
class TextClassificationOutputElement(BaseInferenceType): """Outputs of inference for the Text Classification task""" label: str """The predicted class label.""" score: float """The corresponding probability."""
231
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_classification.py
class DocumentQuestionAnsweringInputData(BaseInferenceType): """One (document, question) pair to answer""" image: Any """The image on which the question is asked""" question: str """A question to ask of the document"""
232
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/document_question_answering.py
class DocumentQuestionAnsweringParameters(BaseInferenceType): """Additional inference parameters for Document Question Answering"""
233
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/document_question_answering.py
doc_stride: Optional[int] = None """If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. """ handle_impossible_answer: Optional[bool] = None """Whether to accept impossible as an answer""" lang: Optional[str] = None """Language to use while running OCR. Defaults to english.""" max_answer_len: Optional[int] = None """The maximum length of predicted answers (e.g., only answers with a shorter length are considered). """ max_question_len: Optional[int] = None """The maximum length of the question after tokenization. It will be truncated if needed.""" max_seq_len: Optional[int] = None """The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using doc_stride as overlap) if needed. """
233
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/document_question_answering.py
top_k: Optional[int] = None """The number of answers to return (will be chosen by order of likelihood). Can return less than top_k answers if there are not enough options available within the context. """ word_boxes: Optional[List[Union[List[float], str]]] = None """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR step and use the provided bounding boxes instead. """
233
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/document_question_answering.py
class DocumentQuestionAnsweringInput(BaseInferenceType): """Inputs for Document Question Answering inference""" inputs: DocumentQuestionAnsweringInputData """One (document, question) pair to answer""" parameters: Optional[DocumentQuestionAnsweringParameters] = None """Additional inference parameters for Document Question Answering"""
234
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/document_question_answering.py
class DocumentQuestionAnsweringOutputElement(BaseInferenceType): """Outputs of inference for the Document Question Answering task""" answer: str """The answer to the question.""" end: int """The end word index of the answer (in the OCR’d version of the input or provided word boxes). """ score: float """The probability associated to the answer.""" start: int """The start word index of the answer (in the OCR’d version of the input or provided word boxes). """
235
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/document_question_answering.py
class TranslationParameters(BaseInferenceType): """Additional inference parameters for Translation""" clean_up_tokenization_spaces: Optional[bool] = None """Whether to clean up the potential extra spaces in the text output.""" generate_parameters: Optional[Dict[str, Any]] = None """Additional parametrization of the text generation algorithm.""" src_lang: Optional[str] = None """The source language of the text. Required for models that can translate from multiple languages. """ tgt_lang: Optional[str] = None """Target language to translate to. Required for models that can translate to multiple languages. """ truncation: Optional["TranslationTruncationStrategy"] = None """The truncation strategy to use."""
236
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/translation.py
class TranslationInput(BaseInferenceType): """Inputs for Translation inference""" inputs: str """The text to translate.""" parameters: Optional[TranslationParameters] = None """Additional inference parameters for Translation"""
237
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/translation.py
class TranslationOutput(BaseInferenceType): """Outputs of inference for the Translation task""" translation_text: str """The translated text."""
238
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/translation.py
class TextGenerationInputGrammarType(BaseInferenceType): type: "TypeEnum" value: Any """A string that represents a [JSON Schema](https://json-schema.org/). JSON Schema is a declarative language that allows to annotate JSON documents with types and descriptions. """
239
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationInputGenerateParameters(BaseInferenceType): adapter_id: Optional[str] = None """Lora adapter id""" best_of: Optional[int] = None """Generate best_of sequences and return the one if the highest token logprobs.""" decoder_input_details: Optional[bool] = None """Whether to return decoder input token logprobs and ids.""" details: Optional[bool] = None """Whether to return generation details.""" do_sample: Optional[bool] = None """Activate logits sampling.""" frequency_penalty: Optional[float] = None """The parameter for frequency penalty. 1.0 means no penalty Penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. """ grammar: Optional[TextGenerationInputGrammarType] = None max_new_tokens: Optional[int] = None """Maximum number of tokens to generate.""" repetition_penalty: Optional[float] = None
240
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
"""The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. """ return_full_text: Optional[bool] = None """Whether to prepend the prompt to the generated text""" seed: Optional[int] = None """Random sampling seed.""" stop: Optional[List[str]] = None """Stop generating tokens if a member of `stop` is generated.""" temperature: Optional[float] = None """The value used to module the logits distribution.""" top_k: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" top_n_tokens: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-n-filtering.""" top_p: Optional[float] = None """Top-p value for nucleus sampling.""" truncate: Optional[int] = None """Truncate inputs tokens to the given size.""" typical_p: Optional[float] = None """Typical Decoding mass
240
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information. """ watermark: Optional[bool] = None """Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226). """
240
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationInput(BaseInferenceType): """Text Generation Input. Auto-generated from TGI specs. For more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. """ inputs: str parameters: Optional[TextGenerationInputGenerateParameters] = None stream: Optional[bool] = None
241
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationOutputPrefillToken(BaseInferenceType): id: int logprob: float text: str
242
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationOutputToken(BaseInferenceType): id: int logprob: float special: bool text: str
243
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationOutputBestOfSequence(BaseInferenceType): finish_reason: "TextGenerationOutputFinishReason" generated_text: str generated_tokens: int prefill: List[TextGenerationOutputPrefillToken] tokens: List[TextGenerationOutputToken] seed: Optional[int] = None top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
244
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationOutputDetails(BaseInferenceType): finish_reason: "TextGenerationOutputFinishReason" generated_tokens: int prefill: List[TextGenerationOutputPrefillToken] tokens: List[TextGenerationOutputToken] best_of_sequences: Optional[List[TextGenerationOutputBestOfSequence]] = None seed: Optional[int] = None top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
245
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationOutput(BaseInferenceType): """Text Generation Output. Auto-generated from TGI specs. For more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. """ generated_text: str details: Optional[TextGenerationOutputDetails] = None
246
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationStreamOutputStreamDetails(BaseInferenceType): finish_reason: "TextGenerationOutputFinishReason" generated_tokens: int input_length: int seed: Optional[int] = None
247
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationStreamOutputToken(BaseInferenceType): id: int logprob: float special: bool text: str
248
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class TextGenerationStreamOutput(BaseInferenceType): """Text Generation Stream Output. Auto-generated from TGI specs. For more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. """ index: int token: TextGenerationStreamOutputToken details: Optional[TextGenerationStreamOutputStreamDetails] = None generated_text: Optional[str] = None top_tokens: Optional[List[TextGenerationStreamOutputToken]] = None
249
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_generation.py
class ZeroShotObjectDetectionParameters(BaseInferenceType): """Additional inference parameters for Zero Shot Object Detection""" candidate_labels: List[str] """The candidate labels for this image"""
250
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py
class ZeroShotObjectDetectionInput(BaseInferenceType): """Inputs for Zero Shot Object Detection inference""" inputs: str """The input image data as a base64-encoded string.""" parameters: ZeroShotObjectDetectionParameters """Additional inference parameters for Zero Shot Object Detection"""
251
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py
class ZeroShotObjectDetectionBoundingBox(BaseInferenceType): """The predicted bounding box. Coordinates are relative to the top left corner of the input image. """ xmax: int xmin: int ymax: int ymin: int
252
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py
class ZeroShotObjectDetectionOutputElement(BaseInferenceType): """Outputs of inference for the Zero Shot Object Detection task""" box: ZeroShotObjectDetectionBoundingBox """The predicted bounding box. Coordinates are relative to the top left corner of the input image. """ label: str """A candidate label""" score: float """The associated score / probability"""
253
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py
class AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType): """Parametrization of the text generation process"""
254
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
do_sample: Optional[bool] = None """Whether to use sampling instead of greedy decoding when generating new tokens.""" early_stopping: Optional[Union[bool, "AutomaticSpeechRecognitionEarlyStoppingEnum"]] = None """Controls the stopping condition for beam-based methods.""" epsilon_cutoff: Optional[float] = None """If set to float strictly between 0 and 1, only tokens with a conditional probability greater than epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details. """ eta_cutoff: Optional[float] = None """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
254
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details. """ max_length: Optional[int] = None """The maximum length (in tokens) of the generated text, including the input.""" max_new_tokens: Optional[int] = None """The maximum number of tokens to generate. Takes precedence over max_length.""" min_length: Optional[int] = None """The minimum length (in tokens) of the generated text, including the input.""" min_new_tokens: Optional[int] = None """The minimum number of tokens to generate. Takes precedence over min_length.""" num_beam_groups: Optional[int] = None """Number of groups to divide num_beams into in order to ensure diversity among different
254
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. """ num_beams: Optional[int] = None """Number of beams to use for beam search.""" penalty_alpha: Optional[float] = None """The value balances the model confidence and the degeneration penalty in contrastive search decoding. """ temperature: Optional[float] = None """The value used to modulate the next token probabilities.""" top_k: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" top_p: Optional[float] = None """If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. """ typical_p: Optional[float] = None """Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next,
254
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. """ use_cache: Optional[bool] = None """Whether the model should use the past last key/values attentions to speed up decoding"""
254
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
class AutomaticSpeechRecognitionParameters(BaseInferenceType): """Additional inference parameters for Automatic Speech Recognition""" return_timestamps: Optional[bool] = None """Whether to output corresponding timestamps with the generated text""" # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers generate_kwargs: Optional[AutomaticSpeechRecognitionGenerationParameters] = None """Parametrization of the text generation process"""
255
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
class AutomaticSpeechRecognitionInput(BaseInferenceType): """Inputs for Automatic Speech Recognition inference""" inputs: str """The input audio data as a base64-encoded string. If no `parameters` are provided, you can also provide the audio data as a raw bytes payload. """ parameters: Optional[AutomaticSpeechRecognitionParameters] = None """Additional inference parameters for Automatic Speech Recognition"""
256
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType): text: str """A chunk of text identified by the model""" timestamps: List[float] """The start and end timestamps corresponding with the text"""
257
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
class AutomaticSpeechRecognitionOutput(BaseInferenceType): """Outputs of inference for the Automatic Speech Recognition task""" text: str """The recognized text.""" chunks: Optional[List[AutomaticSpeechRecognitionOutputChunk]] = None """When returnTimestamps is enabled, chunks contains a list of audio chunks identified by the model. """
258
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py
class SummarizationParameters(BaseInferenceType): """Additional inference parameters for summarization.""" clean_up_tokenization_spaces: Optional[bool] = None """Whether to clean up the potential extra spaces in the text output.""" generate_parameters: Optional[Dict[str, Any]] = None """Additional parametrization of the text generation algorithm.""" truncation: Optional["SummarizationTruncationStrategy"] = None """The truncation strategy to use."""
259
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/summarization.py
class SummarizationInput(BaseInferenceType): """Inputs for Summarization inference""" inputs: str """The input text to summarize.""" parameters: Optional[SummarizationParameters] = None """Additional inference parameters for summarization."""
260
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/summarization.py
class SummarizationOutput(BaseInferenceType): """Outputs of inference for the Summarization task""" summary_text: str """The summarized text."""
261
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/summarization.py
class ZeroShotImageClassificationParameters(BaseInferenceType): """Additional inference parameters for Zero Shot Image Classification""" candidate_labels: List[str] """The candidate labels for this image""" hypothesis_template: Optional[str] = None """The sentence used in conjunction with `candidate_labels` to attempt the image classification by replacing the placeholder with the candidate labels. """
262
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py
class ZeroShotImageClassificationInput(BaseInferenceType): """Inputs for Zero Shot Image Classification inference""" inputs: str """The input image data to classify as a base64-encoded string.""" parameters: ZeroShotImageClassificationParameters """Additional inference parameters for Zero Shot Image Classification"""
263
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py
class ZeroShotImageClassificationOutputElement(BaseInferenceType): """Outputs of inference for the Zero Shot Image Classification task""" label: str """The predicted class label.""" score: float """The corresponding probability."""
264
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py
class ObjectDetectionParameters(BaseInferenceType): """Additional inference parameters for Object Detection""" threshold: Optional[float] = None """The probability necessary to make a prediction."""
265
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/object_detection.py
class ObjectDetectionInput(BaseInferenceType): """Inputs for Object Detection inference""" inputs: str """The input image data as a base64-encoded string. If no `parameters` are provided, you can also provide the image data as a raw bytes payload. """ parameters: Optional[ObjectDetectionParameters] = None """Additional inference parameters for Object Detection"""
266
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/object_detection.py
class ObjectDetectionBoundingBox(BaseInferenceType): """The predicted bounding box. Coordinates are relative to the top left corner of the input image. """ xmax: int """The x-coordinate of the bottom-right corner of the bounding box.""" xmin: int """The x-coordinate of the top-left corner of the bounding box.""" ymax: int """The y-coordinate of the bottom-right corner of the bounding box.""" ymin: int """The y-coordinate of the top-left corner of the bounding box."""
267
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/object_detection.py
class ObjectDetectionOutputElement(BaseInferenceType): """Outputs of inference for the Object Detection task""" box: ObjectDetectionBoundingBox """The predicted bounding box. Coordinates are relative to the top left corner of the input image. """ label: str """The predicted label for the bounding box.""" score: float """The associated score / probability."""
268
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/object_detection.py
class TableQuestionAnsweringInputData(BaseInferenceType): """One (table, question) pair to answer""" question: str """The question to be answered about the table""" table: Dict[str, List[str]] """The table to serve as context for the questions"""
269
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/table_question_answering.py
class TableQuestionAnsweringParameters(BaseInferenceType): """Additional inference parameters for Table Question Answering""" padding: Optional["Padding"] = None """Activates and controls padding.""" sequential: Optional[bool] = None """Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature. """ truncation: Optional[bool] = None """Activates and controls truncation."""
270
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/table_question_answering.py
class TableQuestionAnsweringInput(BaseInferenceType): """Inputs for Table Question Answering inference""" inputs: TableQuestionAnsweringInputData """One (table, question) pair to answer""" parameters: Optional[TableQuestionAnsweringParameters] = None """Additional inference parameters for Table Question Answering"""
271
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/table_question_answering.py
class TableQuestionAnsweringOutputElement(BaseInferenceType): """Outputs of inference for the Table Question Answering task""" answer: str """The answer of the question given the table. If there is an aggregator, the answer will be preceded by `AGGREGATOR >`. """ cells: List[str] """List of strings made up of the answer cell values.""" coordinates: List[List[int]] """Coordinates of the cells of the answers.""" aggregator: Optional[str] = None """If the model has an aggregator, this returns the aggregator."""
272
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/table_question_answering.py
class BaseInferenceType(dict): """Base class for all inference types. Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future. Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields are made optional, and non-expected fields are added as dict attributes). """ @classmethod def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]: """Alias to parse server response and return a single instance. See `parse_obj` for more details. """ output = cls.parse_obj(data) if not isinstance(output, list): raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.") return output @classmethod def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T: """Alias to parse server response and return a single instance.
273
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/base.py
See `parse_obj` for more details. """ output = cls.parse_obj(data) if isinstance(output, list): raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.") return output @classmethod def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]: """Parse server response as a dataclass or list of dataclasses. To enable future-compatibility, we want to handle cases where the server return more fields than expected. In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are added as dict attributes. """ # Parse server response (from bytes) if isinstance(data, bytes): data = data.decode() if isinstance(data, str): data = json.loads(data)
273
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/base.py
# If a list, parse each item individually if isinstance(data, List): return [cls.parse_obj(d) for d in data] # type: ignore [misc] # At this point, we expect a dict if not isinstance(data, dict): raise ValueError(f"Invalid data type: {type(data)}") init_values = {} other_values = {} for key, value in data.items(): key = normalize_key(key) if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init: if isinstance(value, dict) or isinstance(value, list): field_type = cls.__dataclass_fields__[key].type # if `field_type` is a `BaseInferenceType`, parse it if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType): value = field_type.parse_obj(value)
273
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/base.py
# otherwise, recursively parse nested dataclasses (if possible) # `get_args` returns handle Union and Optional for us else: expected_types = get_args(field_type) for expected_type in expected_types: if getattr(expected_type, "_name", None) == "List": expected_type = get_args(expected_type)[ 0 ] # assume same type for all items in the list if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType): value = expected_type.parse_obj(value) break init_values[key] = value else: other_values[key] = value
273
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/base.py
# Make all missing fields default to None # => ensure that dataclass initialization will never fail even if the server does not return all fields. for key in cls.__dataclass_fields__: if key not in init_values: init_values[key] = None # Initialize dataclass with expected values item = cls(**init_values) # Add remaining fields as dict attributes item.update(other_values) return item def __post_init__(self): self.update(asdict(self)) def __setitem__(self, __key: Any, __value: Any) -> None: # Hacky way to keep dataclass values in sync when dict is updated super().__setitem__(__key, __value) if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value: self.__setattr__(__key, __value) return
273
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/base.py
def __setattr__(self, __name: str, __value: Any) -> None: # Hacky way to keep dict values is sync when dataclass is updated super().__setattr__(__name, __value) if self.get(__name) != __value: self[__name] = __value return
273
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/base.py
class AudioToAudioInput(BaseInferenceType): """Inputs for Audio to Audio inference""" inputs: Any """The input audio data"""
274
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/audio_to_audio.py
class AudioToAudioOutputElement(BaseInferenceType): """Outputs of inference for the Audio To Audio task A generated audio file with its label. """ blob: Any """The generated audio file.""" content_type: str """The content type of audio file.""" label: str """The label of the audio file."""
275
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/audio_to_audio.py
class TokenClassificationParameters(BaseInferenceType): """Additional inference parameters for Token Classification""" aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None """The strategy used to fuse tokens based on model predictions""" ignore_labels: Optional[List[str]] = None """A list of labels to ignore""" stride: Optional[int] = None """The number of overlapping tokens between chunks when splitting the input text."""
276
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/token_classification.py
class TokenClassificationInput(BaseInferenceType): """Inputs for Token Classification inference""" inputs: str """The input text data""" parameters: Optional[TokenClassificationParameters] = None """Additional inference parameters for Token Classification"""
277
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/token_classification.py
class TokenClassificationOutputElement(BaseInferenceType): """Outputs of inference for the Token Classification task""" end: int """The character position in the input where this group ends.""" score: float """The associated score / probability""" start: int """The character position in the input where this group begins.""" word: str """The corresponding text""" entity: Optional[str] = None """The predicted label for a single token""" entity_group: Optional[str] = None """The predicted label for a group of one or more tokens"""
278
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/token_classification.py
class UserCommands(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): login_parser = parser.add_parser("login", help="Log in using a token from huggingface.co/settings/tokens") login_parser.add_argument( "--token", type=str, help="Token generated from https://huggingface.co/settings/tokens", ) login_parser.add_argument( "--add-to-git-credential", action="store_true", help="Optional: Save token to git credential helper.", ) login_parser.set_defaults(func=lambda args: LoginCommand(args)) whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.") whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
279
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/commands/user.py
logout_parser = parser.add_parser("logout", help="Log out") logout_parser.add_argument( "--token-name", type=str, help="Optional: Name of the access token to log out from.", ) logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
279
/Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/commands/user.py