text
stringlengths 1
1.02k
| class_index
int64 0
10.8k
| source
stringlengths 85
188
|
---|---|---|
@property
def num_labels(self) -> int:
"""
`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys())) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
@property
def _attn_implementation(self):
# This property is made private for now (as it cannot be changed and a PreTrainedModel.use_attn_implementation method needs to be implemented.)
if hasattr(self, "_attn_implementation_internal"):
if self._attn_implementation_internal is None:
# `config.attn_implementation` should never be None, for backward compatibility.
return "eager"
else:
return self._attn_implementation_internal
else:
return "eager"
@_attn_implementation.setter
def _attn_implementation(self, value):
self._attn_implementation_internal = value
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~PretrainedConfig.from_pretrained`] class method. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`Dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
self._set_token_in_kwargs(kwargs)
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
non_default_generation_parameters = self._get_non_default_generation_parameters()
if len(non_default_generation_parameters) > 0:
# TODO (joao): this should be an exception if the user has modified the loaded config. See #33886
warnings.warn(
"Some non-default generation parameters are set in the model config. These should go into either a) "
"`model.generation_config` (as opposed to `model.config`); OR b) a GenerationConfig file "
"(https://huggingface.co/docs/transformers/generation_strategies#save-a-custom-decoding-strategy-with-your-model)."
"This warning will become an exception in the future."
f"\nNon-default generation parameters: {str(non_default_generation_parameters)}",
UserWarning,
)
os.makedirs(save_directory, exist_ok=True) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}") | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if push_to_hub:
self._upload_modified_files(
save_directory,
repo_id,
files_timestamps,
commit_message=commit_message,
token=kwargs.get("token"),
)
@staticmethod
def _set_token_in_kwargs(kwargs, token=None):
"""Temporary method to deal with `token` and `use_auth_token`.
This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.
Need to clean up `use_auth_token` in a follow PR.
"""
# Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet.
if token is None:
token = kwargs.pop("token", None)
use_auth_token = kwargs.pop("use_auth_token", None) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError(
"`token` and `use_auth_token` are both specified. Please set only the argument `token`."
)
token = use_auth_token
if token is not None:
kwargs["token"] = token | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: Union[str, os.PathLike],
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
local_files_only: bool = False,
token: Optional[Union[str, bool]] = None,
revision: str = "main",
**kwargs,
) -> "PretrainedConfig":
r"""
Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either: | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Will be removed in v5 of Transformers.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
kwargs (`Dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Returns:
[`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
Examples: | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
```python
# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained(
"google-bert/bert-base-uncased"
) # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained(
"./test/saved_model/"
) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
config = BertConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained(
"google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
)
assert config.output_attentions == True
assert unused_kwargs == {"foo": False}
```""" | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
kwargs["cache_dir"] = cache_dir
kwargs["force_download"] = force_download
kwargs["local_files_only"] = local_files_only
kwargs["revision"] = revision | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
cls._set_token_in_kwargs(kwargs, token)
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if cls.base_config_key and cls.base_config_key in config_dict:
config_dict = config_dict[cls.base_config_key]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
# sometimes the config has no `base_config_key` if the config is used in several composite models
# e.g. LlamaConfig. In that case we try to see if there is match in `model_type` before raising a warning
for k, v in config_dict.items():
if isinstance(v, dict) and v.get("model_type") == cls.model_type:
config_dict = v | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
# raise warning only if we still can't see a match in `model_type`
if config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
[`PretrainedConfig`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Returns:
`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
cls._set_token_in_kwargs(kwargs)
original_kwargs = copy.deepcopy(kwargs)
# Get config dict associated with the base config file
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
if config_dict is None:
return {}, kwargs
if "_commit_hash" in config_dict:
original_kwargs["_commit_hash"] = config_dict["_commit_hash"]
# That config file may point us toward another config file to use.
if "configuration_files" in config_dict:
configuration_file = get_configuration_file(config_dict["configuration_files"])
config_dict, kwargs = cls._get_config_dict(
pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
)
return config_dict, kwargs | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
@classmethod
def _get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", None)
proxies = kwargs.pop("proxies", None)
token = kwargs.pop("token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
trust_remote_code = kwargs.pop("trust_remote_code", None)
subfolder = kwargs.pop("subfolder", "")
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
commit_hash = kwargs.pop("_commit_hash", None)
gguf_file = kwargs.get("gguf_file", None) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if trust_remote_code is True:
logger.warning(
"The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
" ignored."
)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
pretrained_model_name_or_path = str(pretrained_model_name_or_path) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
# Special case when pretrained_model_name_or_path is a local file
resolved_config_file = pretrained_model_name_or_path
is_local = True
elif is_remote_url(pretrained_model_name_or_path):
configuration_file = pretrained_model_name_or_path if gguf_file is None else gguf_file
resolved_config_file = download_url(pretrained_model_name_or_path)
else:
configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME) if gguf_file is None else gguf_file | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
try:
# Load from local folder or from cache or download from model Hub and cache
resolved_config_file = cached_file(
pretrained_model_name_or_path,
configuration_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_commit_hash=commit_hash,
)
if resolved_config_file is None:
return None, kwargs
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
except EnvironmentError: | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
# Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
# the original exception.
raise
except Exception:
# For any other exception, we throw a generic error.
raise EnvironmentError(
f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it"
" from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
f" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory"
f" containing a {configuration_file} file"
) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
try:
if gguf_file:
config_dict = load_gguf_checkpoint(resolved_config_file, return_tensors=False)["config"]
else:
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
config_dict["_commit_hash"] = commit_hash
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
)
if is_local:
logger.info(f"loading configuration file {resolved_config_file}")
else:
logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if "auto_map" in config_dict and not is_local:
config_dict["auto_map"] = add_model_info_to_auto_map(
config_dict["auto_map"], pretrained_model_name_or_path
)
if "custom_pipelines" in config_dict and not is_local:
config_dict["custom_pipelines"] = add_model_info_to_custom_pipelines(
config_dict["custom_pipelines"], pretrained_model_name_or_path
)
# timm models are not saved with the model_type in the config file
if "model_type" not in config_dict and is_timm_config_dict(config_dict):
config_dict["model_type"] = "timm_wrapper"
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
# Those arguments may be passed along for our internal telemetry.
# We remove them so they don't appear in `return_unused_kwargs`.
kwargs.pop("_from_auto", None)
kwargs.pop("_from_pipeline", None)
# The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
kwargs["_commit_hash"] = config_dict["_commit_hash"]
# We remove it from kwargs so that it does not appear in `return_unused_kwargs`.
config_dict["attn_implementation"] = kwargs.pop("attn_implementation", None)
config = cls(**config_dict) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if hasattr(config, "pruned_heads"):
config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()} | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
# Update config with kwargs if needed
if "num_labels" in kwargs and "id2label" in kwargs:
num_labels = kwargs["num_labels"]
id2label = kwargs["id2label"] if kwargs["id2label"] is not None else []
if len(id2label) != num_labels:
raise ValueError(
f"You passed along `num_labels={num_labels }` with an incompatible id to label map: "
f"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove "
"one of them."
)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
current_attr = getattr(config, key)
# To authorize passing a custom subconfig as kwarg in models that have nested configs.
if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):
value = current_attr.__class__(**value) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
def __eq__(self, other):
return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__)
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def __iter__(self):
for attr in self.__dict__:
yield attr
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {} | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
isinstance(getattr(self, key, None), PretrainedConfig)
and key in class_config_dict
and isinstance(class_config_dict[key], dict)
):
# For nested configs we need to clean the diff recursively
diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None))
if "model_type" in value:
# Needs to be set even if it's not in the diff
diff["model_type"] = value["model_type"]
if len(diff) > 0:
serializable_config_dict[key] = diff
elif (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key]) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
):
serializable_config_dict[key] = value | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if hasattr(self, "quantization_config"):
serializable_config_dict["quantization_config"] = (
self.quantization_config.to_dict()
if not isinstance(self.quantization_config, dict)
else self.quantization_config
)
# pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
_ = serializable_config_dict.pop("_pre_quantization_dtype", None)
self.dict_torch_dtype_to_str(serializable_config_dict)
if "_attn_implementation_internal" in serializable_config_dict:
del serializable_config_dict["_attn_implementation_internal"]
# Do not serialize `base_model_tp_plan` for now
if "base_model_tp_plan" in serializable_config_dict:
del serializable_config_dict["base_model_tp_plan"]
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
if "_auto_class" in output:
del output["_auto_class"]
if "_commit_hash" in output:
del output["_commit_hash"]
if "_attn_implementation_internal" in output:
del output["_attn_implementation_internal"]
# Do not serialize `base_model_tp_plan` for now
if "base_model_tp_plan" in output:
del output["base_model_tp_plan"]
# Transformers version when serializing the model
output["transformers_version"] = __version__ | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
for key, value in output.items():
# Deal with nested configs like CLIP
if isinstance(value, PretrainedConfig):
value = value.to_dict()
del value["transformers_version"]
output[key] = value
if hasattr(self, "quantization_config"):
output["quantization_config"] = (
self.quantization_config.to_dict()
if not isinstance(self.quantization_config, dict)
else self.quantization_config
)
# pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
_ = output.pop("_pre_quantization_dtype", None)
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict") | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise TypeError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None:
if isinstance(d["torch_dtype"], dict):
d["torch_dtype"] = {k: str(v).split(".")[-1] for k, v in d["torch_dtype"].items()}
elif not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
for value in d.values():
if isinstance(value, dict):
self.dict_torch_dtype_to_str(value) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
@classmethod
def register_for_auto_class(cls, auto_class="AutoConfig"):
"""
Register this class with a given auto class. This should only be used for custom configurations as the ones in
the library are already mapped with `AutoConfig`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
The auto class to register this new configuration with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
@staticmethod
def _get_global_generation_defaults() -> Dict[str, Any]:
return {
"max_length": 20,
"min_length": 0,
"do_sample": False,
"early_stopping": False,
"num_beams": 1,
"num_beam_groups": 1,
"diversity_penalty": 0.0,
"temperature": 1.0,
"top_k": 50,
"top_p": 1.0,
"typical_p": 1.0,
"repetition_penalty": 1.0,
"length_penalty": 1.0,
"no_repeat_ngram_size": 0,
"encoder_no_repeat_ngram_size": 0,
"bad_words_ids": None,
"num_return_sequences": 1,
"output_scores": False,
"return_dict_in_generate": False,
"forced_bos_token_id": None,
"forced_eos_token_id": None,
"remove_invalid_values": False,
"exponential_decay_length_penalty": None,
"suppress_tokens": None,
"begin_suppress_tokens": None,
} | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
def _get_non_default_generation_parameters(self) -> Dict[str, Any]:
"""
Gets the non-default generation parameters on the PretrainedConfig instance
"""
non_default_generation_parameters = {}
decoder_attribute_name = None
# Composite models don't have a default config, use their decoder config as a fallback for default values
# If no known pattern is matched, then `default_config = None` -> check against the global generation defaults
try:
default_config = self.__class__()
except ValueError:
decoder_config = self.get_text_config(decoder=True)
if decoder_config is not self:
default_config = decoder_config.__class__()
else:
default_config = None
# If it is a composite model, we want to check the subconfig that will be used for generation
self_decoder_config = self if decoder_attribute_name is None else getattr(self, decoder_attribute_name) | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
for parameter_name, default_global_value in self._get_global_generation_defaults().items():
if hasattr(self_decoder_config, parameter_name):
is_default_in_config = is_default_generation_value = None
parameter_value = getattr(self_decoder_config, parameter_name)
# Three cases in which is okay for the model config to hold generation config parameters:
# 1. The parameter is set to `None`, effectivelly delegating its value to the generation config
if parameter_value is None:
continue
# 2. If we have a default config, then the instance should hold the same generation defaults
if default_config is not None:
is_default_in_config = parameter_value == getattr(default_config, parameter_name)
# 3. if we don't have a default config, then the instance should hold the global generation defaults
else: | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
is_default_generation_value = parameter_value == default_global_value | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
is_non_default = (is_default_in_config is False) or (
is_default_in_config is None and is_default_generation_value is False
)
if is_non_default:
non_default_generation_parameters[parameter_name] = getattr(self_decoder_config, parameter_name)
return non_default_generation_parameters
def get_text_config(self, decoder=False) -> "PretrainedConfig":
"""
Returns the config that is meant to be used with text IO. On most models, it is the original config instance
itself. On specific composite models, it is under a set of valid names. | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
If `decoder` is set to `True`, then only search for decoder config names.
"""
decoder_possible_text_config_names = ("decoder", "generator", "text_config")
encoder_possible_text_config_names = ("text_encoder",)
if decoder:
possible_text_config_names = decoder_possible_text_config_names
else:
possible_text_config_names = encoder_possible_text_config_names + decoder_possible_text_config_names
valid_text_config_names = []
for text_config_name in possible_text_config_names:
if hasattr(self, text_config_name):
text_config = getattr(self, text_config_name, None)
if text_config is not None:
valid_text_config_names += [text_config_name] | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
if len(valid_text_config_names) > 1:
raise ValueError(
f"Multiple valid text configs were found in the model config: {valid_text_config_names}. In this "
"case, using `get_text_config()` would be ambiguous. Please specify the desied text config directly."
)
elif len(valid_text_config_names) == 1:
return getattr(self, valid_text_config_names[0])
return self | 46 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py |
class FlashAttentionKwargs(TypedDict, total=False):
"""
Keyword arguments for Flash Attention with Compile.
Attributes:
cu_seq_lens_q (`torch.LongTensor`, *optional*)
Gets cumlative sequence length for query state.
cu_seq_lens_k (`torch.LongTensor`, *optional*)
Gets cumlative sequence length for key state.
max_length_q (`int`, *optional*):
Maximum sequence length for query state.
max_length_k (`int`, *optional*):
Maximum sequence length for key state.
"""
cu_seq_lens_q: Optional[torch.LongTensor]
cu_seq_lens_k: Optional[torch.LongTensor]
max_length_q: Optional[int]
max_length_k: Optional[int] | 47 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flash_attention_utils.py |
class SizeDict:
"""
Hashable dictionary to store image size information.
"""
height: int = None
width: int = None
longest_edge: int = None
shortest_edge: int = None
max_height: int = None
max_width: int = None
def __getitem__(self, key):
if hasattr(self, key):
return getattr(self, key)
raise KeyError(f"Key {key} not found in SizeDict.") | 48 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils_fast.py |
class BaseImageProcessorFast(BaseImageProcessor):
_transform_params = None
def _build_transforms(self, **kwargs) -> "Compose":
"""
Given the input settings e.g. do_resize, build the image transforms.
"""
raise NotImplementedError
def _validate_params(self, **kwargs) -> None:
for k, v in kwargs.items():
if k not in self._transform_params:
raise ValueError(f"Invalid transform parameter {k}={v}.")
@functools.lru_cache(maxsize=1)
def get_transforms(self, **kwargs) -> "Compose":
self._validate_params(**kwargs)
return self._build_transforms(**kwargs)
def to_dict(self):
encoder_dict = super().to_dict()
encoder_dict.pop("_transform_params", None)
return encoder_dict | 49 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils_fast.py |
class ChannelDimension(ExplicitEnum):
FIRST = "channels_first"
LAST = "channels_last" | 50 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
class AnnotationFormat(ExplicitEnum):
COCO_DETECTION = "coco_detection"
COCO_PANOPTIC = "coco_panoptic" | 51 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
class AnnotionFormat(ExplicitEnum):
COCO_DETECTION = AnnotationFormat.COCO_DETECTION.value
COCO_PANOPTIC = AnnotationFormat.COCO_PANOPTIC.value | 52 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
class ImageType(ExplicitEnum):
PIL = "pillow"
TORCH = "torch"
NUMPY = "numpy"
TENSORFLOW = "tensorflow"
JAX = "jax" | 53 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
class ImageFeatureExtractionMixin:
"""
Mixin that contain utilities for preparing image features.
"""
def _ensure_format_supported(self, image):
if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image):
raise ValueError(
f"Got type {type(image)} which is not supported, only `PIL.Image.Image`, `np.array` and "
"`torch.Tensor` are."
)
def to_pil_image(self, image, rescale=None):
"""
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
needed. | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
Args:
image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
The image to convert to the PIL Image format.
rescale (`bool`, *optional*):
Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
default to `True` if the image type is a floating type, `False` otherwise.
"""
self._ensure_format_supported(image)
if is_torch_tensor(image):
image = image.numpy() | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
if isinstance(image, np.ndarray):
if rescale is None:
# rescale default to the array being of floating type.
rescale = isinstance(image.flat[0], np.floating)
# If the channel as been moved to first dim, we put it back at the end.
if image.ndim == 3 and image.shape[0] in [1, 3]:
image = image.transpose(1, 2, 0)
if rescale:
image = image * 255
image = image.astype(np.uint8)
return PIL.Image.fromarray(image)
return image
def convert_rgb(self, image):
"""
Converts `PIL.Image.Image` to RGB format.
Args:
image (`PIL.Image.Image`):
The image to convert.
"""
self._ensure_format_supported(image)
if not isinstance(image, PIL.Image.Image):
return image
return image.convert("RGB") | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray:
"""
Rescale a numpy image by scale amount
"""
self._ensure_format_supported(image)
return image * scale
def to_numpy_array(self, image, rescale=None, channel_first=True):
"""
Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
dimension. | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to convert to a NumPy array.
rescale (`bool`, *optional*):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
channel_first (`bool`, *optional*, defaults to `True`):
Whether or not to permute the dimensions of the image to put the channel dimension first.
"""
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = np.array(image)
if is_torch_tensor(image):
image = image.numpy()
rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale
if rescale:
image = self.rescale(image.astype(np.float32), 1 / 255.0) | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
if channel_first and image.ndim == 3:
image = image.transpose(2, 0, 1)
return image
def expand_dims(self, image):
"""
Expands 2-dimensional `image` to 3 dimensions.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to expand.
"""
self._ensure_format_supported(image)
# Do nothing if PIL image
if isinstance(image, PIL.Image.Image):
return image
if is_torch_tensor(image):
image = image.unsqueeze(0)
else:
image = np.expand_dims(image, axis=0)
return image
def normalize(self, image, mean, std, rescale=False):
"""
Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
if it's a PIL Image. | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to normalize.
mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
The mean (per channel) to use for normalization.
std (`List[float]` or `np.ndarray` or `torch.Tensor`):
The standard deviation (per channel) to use for normalization.
rescale (`bool`, *optional*, defaults to `False`):
Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will
happen automatically.
"""
self._ensure_format_supported(image) | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
if isinstance(image, PIL.Image.Image):
image = self.to_numpy_array(image, rescale=True)
# If the input image is a PIL image, it automatically gets rescaled. If it's another
# type it may need rescaling.
elif rescale:
if isinstance(image, np.ndarray):
image = self.rescale(image.astype(np.float32), 1 / 255.0)
elif is_torch_tensor(image):
image = self.rescale(image.float(), 1 / 255.0)
if isinstance(image, np.ndarray):
if not isinstance(mean, np.ndarray):
mean = np.array(mean).astype(image.dtype)
if not isinstance(std, np.ndarray):
std = np.array(std).astype(image.dtype)
elif is_torch_tensor(image):
import torch | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
if not isinstance(mean, torch.Tensor):
if isinstance(mean, np.ndarray):
mean = torch.from_numpy(mean)
else:
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
if isinstance(std, np.ndarray):
std = torch.from_numpy(std)
else:
std = torch.tensor(std)
if image.ndim == 3 and image.shape[0] in [1, 3]:
return (image - mean[:, None, None]) / std[:, None, None]
else:
return (image - mean) / std
def resize(self, image, size, resample=None, default_to_square=True, max_size=None):
"""
Resizes `image`. Enforces conversion of input to PIL.Image. | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
size (`int` or `Tuple[int, int]`):
The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
matched to this. | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
`size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
The filter to user for resampling.
default_to_square (`bool`, *optional*, defaults to `True`):
How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
square (`size`,`size`). If set to `False`, will replicate
[`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
with support for resizing only the smallest edge and providing an optional `max_size`. | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
max_size (`int`, *optional*, defaults to `None`):
The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
greater than `max_size` after being resized according to `size`, then the image is resized again so
that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
edge may be shorter than `size`. Only used if `default_to_square` is `False`. | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
Returns:
image: A resized `PIL.Image.Image`.
"""
resample = resample if resample is not None else PILImageResampling.BILINEAR
self._ensure_format_supported(image)
if not isinstance(image, PIL.Image.Image):
image = self.to_pil_image(image)
if isinstance(size, list):
size = tuple(size)
if isinstance(size, int) or len(size) == 1:
if default_to_square:
size = (size, size) if isinstance(size, int) else (size[0], size[0])
else:
width, height = image.size
# specified size only for the smallest edge
short, long = (width, height) if width <= height else (height, width)
requested_new_short = size if isinstance(size, int) else size[0]
if short == requested_new_short:
return image
new_short, new_long = requested_new_short, int(requested_new_short * long / short) | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
if max_size is not None:
if max_size <= requested_new_short:
raise ValueError(
f"max_size = {max_size} must be strictly greater than the requested "
f"size for the smaller edge size = {size}"
)
if new_long > max_size:
new_short, new_long = int(max_size * new_short / new_long), max_size
size = (new_short, new_long) if width <= height else (new_long, new_short)
return image.resize(size, resample=resample)
def center_crop(self, image, size):
"""
Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
size given, it will be padded (so the returned result has the size asked). | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):
The image to resize.
size (`int` or `Tuple[int, int]`):
The size to which crop the image.
Returns:
new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,
height, width).
"""
self._ensure_format_supported(image)
if not isinstance(size, tuple):
size = (size, size)
# PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width)
if is_torch_tensor(image) or isinstance(image, np.ndarray):
if image.ndim == 2:
image = self.expand_dims(image)
image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2]
else:
image_shape = (image.size[1], image.size[0]) | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
top = (image_shape[0] - size[0]) // 2
bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
left = (image_shape[1] - size[1]) // 2
right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
# For PIL Images we have a method to crop directly.
if isinstance(image, PIL.Image.Image):
return image.crop((left, top, right, bottom))
# Check if image is in (n_channels, height, width) or (height, width, n_channels) format
channel_first = True if image.shape[0] in [1, 3] else False
# Transpose (height, width, n_channels) format images
if not channel_first:
if isinstance(image, np.ndarray):
image = image.transpose(2, 0, 1)
if is_torch_tensor(image):
image = image.permute(2, 0, 1) | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
# Check if cropped area is within image boundaries
if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]:
return image[..., top:bottom, left:right]
# Otherwise, we may need to pad if the image is too small. Oh joy...
new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))
if isinstance(image, np.ndarray):
new_image = np.zeros_like(image, shape=new_shape)
elif is_torch_tensor(image):
new_image = image.new_zeros(new_shape)
top_pad = (new_shape[-2] - image_shape[0]) // 2
bottom_pad = top_pad + image_shape[0]
left_pad = (new_shape[-1] - image_shape[1]) // 2
right_pad = left_pad + image_shape[1]
new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
top += top_pad
bottom += top_pad
left += left_pad
right += left_pad | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
new_image = new_image[
..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right)
]
return new_image
def flip_channel_order(self, image):
"""
Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of
`image` to a NumPy array if it's a PIL Image.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should
be first.
"""
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = self.to_numpy_array(image)
return image[::-1, :, :] | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None):
"""
Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees
counter clockwise around its centre.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before
rotating.
Returns:
image: A rotated `PIL.Image.Image`.
"""
resample = resample if resample is not None else PIL.Image.NEAREST
self._ensure_format_supported(image)
if not isinstance(image, PIL.Image.Image):
image = self.to_pil_image(image)
return image.rotate(
angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor
) | 54 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py |
class Trie:
"""
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
Loose reference https://en.wikipedia.org/wiki/Trie
"""
def __init__(self, *args):
self.data = {}
self._tokens = set()
self._termination_char = ""
self.update(*args)
def update(self, *args):
"""
Updates the Trie with new tokens provided as arguments.
Args:
*args: Variable number of words to be added to the Trie.
"""
for token in tuple(*args):
self.add(token)
def add(self, word: str):
"""
Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
The special key `""` in `self._termination_char` is used to represent termination.
This function is idempotent, adding twice the same word will leave the trie unchanged
Example: | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
```python
>>> trie = Trie()
>>> trie.add("Hello ει")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {" ": {"ε": {"ι": {"": 1}}}}}}}}}
>>> trie.add("Hello")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"ε": {"ι": {"": 1}}}}}}}}}
```
"""
if not word:
# Prevent empty string
return
self._tokens.add(word)
ref = self.data
for char in word:
ref[char] = ref.setdefault(char, {})
ref = ref[char]
ref[self._termination_char] = 1
def split(self, text: str) -> List[str]:
"""
Will look for the words added to the trie within `text`. Output is the original string splitted along the
boundaries of the words found.
This trie will match the longest possible word first !
Example: | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
```python
>>> trie = Trie()
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS] This is a extra_id_100"]
>>> trie.add("[CLS]")
>>> trie.add("extra_id_1")
>>> trie.add("extra_id_100")
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS]", " This is a ", "extra_id_100"]
```
"""
# indexes are counted left of the chars index.
# "hello", index 0, is left of h, index 1 is between h and e.
# index 5 is right of the "o". | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# States are going to capture every possible start (indexes as above)
# as keys, and have as values, a pointer to the position in the trie
# where we're at. This is a partial match for now.
# This enables to keep track of multiple matches while we're iterating
# the string
# If the trie contains, "blowing", and "lower" and we encounter the
# string "blower", we need to split into ["b", "lower"].
# This is where we need to keep track of multiple possible starts.
states = OrderedDict()
# This will contain every indices where we need
# to cut.
# We force to cut at offset 0 and len(text) (added later)
offsets = [0] | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# This is used by the lookahead which needs to skip over
# some text where the full match exceeded the place in the initial
# for loop
skip = 0
# Main loop, Giving this algorithm O(n) complexity
for current, current_char in enumerate(text):
if skip and current < skip:
# Prevents the lookahead for matching twice
# like extra_id_100 and id_100
continue
# This will track every state
# that stop matching, we need to stop tracking them.
# If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
# fail on "b", we need to remove 0 from the valid states.
to_remove = set()
# Whenever we found a match, we need to drop everything
# this is a greedy algorithm, it will match on the first found token
reset = False | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# In this case, we already have partial matches (But unfinished)
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`. | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# Lookahead to match longest first
# Important in case of extra_id_1 vs extra_id_100
# Here we are also actively looking for other earlier partial
# matches
# "[CLS]", "L", we need to match CLS even if L is special
for lookstart, looktrie_pointer in states.items():
if lookstart > start:
# This partial match is later, we can stop looking
break
elif lookstart < start:
# This partial match is earlier, the trie pointer
# was already updated, so index is + 1
lookahead_index = current + 1
end = current + 1
else:
# Here lookstart == start and
# looktrie_pointer == trie_pointer | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# It wasn't updated yet so indices are current ones
lookahead_index = current
end = current
next_char = text[lookahead_index] if lookahead_index < len(text) else None
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
while next_char in looktrie_pointer:
looktrie_pointer = looktrie_pointer[next_char]
lookahead_index += 1
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
if lookahead_index == len(text):
# End of string
break
next_char = text[lookahead_index]
# End lookahead | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# Storing and resetting
offsets.append(start)
offsets.append(end)
reset = True
break
elif current_char in trie_pointer:
# The current character being looked at has a match within the trie
# update the pointer (it will be stored back into states later).
trie_pointer = trie_pointer[current_char]
# Storing back the new pointer into the states.
# Partial matches got longer by one.
states[start] = trie_pointer
else:
# The new character has not match in the trie, we need
# to stop keeping track of this partial match.
# We can't do it directly within the loop because of how
# python iteration works
to_remove.add(start) | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# Either clearing the full start (we found a real match)
# Or clearing only the partial matches that didn't work.
if reset:
states = {}
else:
for start in to_remove:
del states[start]
# If this character is a starting character within the trie
# start keeping track of this partial match.
if current >= skip and current_char in self.data:
states[current] = self.data[current_char] | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# We have a cut at the end with states.
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
end = len(text)
offsets.append(start)
offsets.append(end)
# Longest cut is always the one with lower start so the first
# item so we need to break.
break
return self.cut_text(text, offsets) | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
def cut_text(self, text, offsets):
# We have all the offsets now, we just need to do the actual splitting.
# We need to eventually add the first part of the string and the eventual
# last part.
offsets.append(len(text))
tokens = []
start = 0
for end in offsets:
if start > end:
logger.error(
"There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it"
" anyway."
)
continue
elif start == end:
# This might happen if there's a match at index 0
# we're also preventing zero-width cuts in case of two
# consecutive matches
continue
tokens.append(text[start:end])
start = end
return tokens | 55 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
class ExtensionsTrie(Trie):
def __init__(self, *args):
super().__init__(*args)
def extensions(self, prefix: str):
"""
Generates all extensions of a given prefix token in the Trie.
Example:
```python
>>> trie = Trie()
>>> trie.add("apple")
>>> trie.add("app")
>>> trie.add("application")
>>> trie.extensions("app")
['app', 'apple', 'application']
```
"""
prefix_node = self._get_node(prefix)
ret = self._collect_tokens(prefix_node)
return [prefix + token for token in ret]
def _get_node(self, token: str) -> dict:
"""
Retrieves the node corresponding to the given token in the Trie.
Args:
token (str): The token for which the corresponding node needs to be retrieved. | 56 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
Returns:
dict: The node in the Trie corresponding to the given token.
"""
node = self.data
for char in token:
if char not in node:
break
node = node[char]
return node
def _collect_tokens(self, node: dict) -> list:
"""
Generates all tokens in the Trie starting from a given node.
Args:
node (dict): The node in the Trie from which tokens need to be generated.
Returns:
list: List of tokens generated from the given node.
"""
tokens = [self._termination_char] if self._termination_char in node else []
for token, subtrie_head in node.items():
if token != self._termination_char:
subtokens = self._collect_tokens(subtrie_head)
tokens.extend([token + subtoken for subtoken in subtokens])
return tokens | 56 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
class PreTrainedTokenizer(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, **kwargs):
# 1. Init the parent class
self.tokens_trie = Trie()
# 2. init `_added_tokens_decoder` if child class did not
if not hasattr(self, "_added_tokens_decoder"):
self._added_tokens_decoder: Dict[int, AddedToken] = {} | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
# 3. if a `added_tokens_decoder` is passed, we are loading from a saved tokenizer, we overwrite
self._added_tokens_decoder.update(kwargs.pop("added_tokens_decoder", {}))
self._added_tokens_encoder: Dict[str, int] = {k.content: v for v, k in self._added_tokens_decoder.items()}
# 4 init the parent class
super().__init__(**kwargs)
# 4. If some of the special tokens are not part of the vocab, we add them, at the end.
# the order of addition is the same as self.SPECIAL_TOKENS_ATTRIBUTES following `tokenizers`
self._add_tokens(
[token for token in self.all_special_tokens_extended if token not in self._added_tokens_encoder],
special_tokens=True,
)
self._decode_use_source_tokenizer = False
@property
def is_fast(self) -> bool:
return False | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
raise NotImplementedError
@property
def added_tokens_encoder(self) -> Dict[str, int]:
"""
Returns the sorted mapping from string to index. The added tokens encoder is cached for performance
optimisation in `self._added_tokens_encoder` for the slow tokenizers.
"""
return {k.content: v for v, k in sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])}
@property
def added_tokens_decoder(self) -> Dict[int, AddedToken]:
"""
Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.
Returns:
`Dict[str, int]`: The added tokens.
"""
return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])) | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
@added_tokens_decoder.setter
def added_tokens_decoder(self, value: Dict[int, Union[AddedToken, str]]) -> Dict[int, AddedToken]:
# Always raise an error if string because users should define the behavior
for index, token in value.items():
if not isinstance(token, (str, AddedToken)) or not isinstance(index, int):
raise TypeError(
f"The provided `added_tokens_decoder` has an element of type {index.__class__, token.__class__}, should be a dict of {int, Union[AddedToken, str]}"
)
self._added_tokens_decoder[index] = AddedToken(token) if isinstance(token, str) else token
self._added_tokens_encoder[str(token)] = index
self._update_total_vocab_size() | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from
the fast call because for now we always add the tokens even if they are already in the vocabulary. This is
something we should change.
Returns:
`Dict[str, int]`: The added tokens.
"""
return self._added_tokens_encoder
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.total_vocab_size
def _update_total_vocab_size(self):
"""
Update the size of the full vocabulary with the added tokens. Counts the `keys` and not the `values` because
otherwise if there is a hole in the vocab, we will add tokenizers at a wrong index. This operation is slow and
is only updated when adding tokens.
"""
self.total_vocab_size = len(self.get_vocab()) | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary. Special tokens are sometimes already in the
vocab which is why they have to be handled specifically. | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
Args:
new_tokens (`List[str]`or `List[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is counted as added if it's not already in the vocabulary
(tested by checking if the tokenizer assign the index of the `unk_token` to them). If a token is part
of the vocabulary then we simply mark this token as an `AddedToken` which allows to control the
stripping and normalization of this token. This is NOT possible in `tokenizers`.
special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the tokens should be added as special tokens.
Returns:
`int`: The number of tokens actually added to the vocabulary.
Examples: | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
model = BertModel.from_pretrained("google-bert/bert-base-uncased") | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
added_tokens = 0
if new_tokens is None:
return added_tokens
# TODO this is fairly slow to improve!
current_vocab = self.get_vocab().copy()
new_idx = len(current_vocab) # only call this once, len gives the last index + 1
for token in new_tokens:
if not isinstance(token, (str, AddedToken)):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if str(token) == "":
continue
if isinstance(token, str):
if token in self._added_tokens_encoder:
continue
else: | 57 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.