code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Dict = logging.get_logger(__name__)
def _lowercase ( __A ):
'''simple docstring'''
if "resnet-50" in model_name:
__UpperCamelCase = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
__UpperCamelCase = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
__UpperCamelCase = DetrConfig(use_timm_backbone=__A ,backbone_config=__A )
# set label attributes
__UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
__UpperCamelCase = 250
else:
__UpperCamelCase = 91
__UpperCamelCase = """huggingface/label-files"""
__UpperCamelCase = """coco-detection-id2label.json"""
__UpperCamelCase = json.load(open(hf_hub_download(__A ,__A ,repo_type="""dataset""" ) ,"""r""" ) )
__UpperCamelCase = {int(__A ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
__UpperCamelCase = """"""
if is_panoptic:
__UpperCamelCase = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:256, :]
__UpperCamelCase = in_proj_bias[:256]
__UpperCamelCase = in_proj_weight[256:512, :]
__UpperCamelCase = in_proj_bias[256:512]
__UpperCamelCase = in_proj_weight[-256:, :]
__UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:256, :]
__UpperCamelCase = in_proj_bias[:256]
__UpperCamelCase = in_proj_weight[256:512, :]
__UpperCamelCase = in_proj_bias[256:512]
__UpperCamelCase = in_proj_weight[-256:, :]
__UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__UpperCamelCase = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__UpperCamelCase = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCamelCase = in_proj_weight_cross_attn[:256, :]
__UpperCamelCase = in_proj_bias_cross_attn[:256]
__UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
__UpperCamelCase = in_proj_bias_cross_attn[256:512]
__UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
__UpperCamelCase = in_proj_bias_cross_attn[-256:]
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCamelCase = Image.open(requests.get(__A ,stream=__A ).raw )
return im
@torch.no_grad()
def _lowercase ( __A ,__A=None ,__A=False ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_detr_config(__A )
# load original model from torch hub
__UpperCamelCase = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f"Converting model {model_name}..." )
__UpperCamelCase = torch.hub.load("""facebookresearch/detr""" ,model_name_to_original_name[model_name] ,pretrained=__A ).eval()
__UpperCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__A ):
if is_panoptic:
__UpperCamelCase = """detr.""" + src
rename_key(__A ,__A ,__A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A ,is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase = DetrForSegmentation(__A ) if is_panoptic else DetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
# verify our conversion on an image
__UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
__UpperCamelCase = DetrImageProcessor(format=__A )
__UpperCamelCase = processor(images=prepare_img() ,return_tensors="""pt""" )
__UpperCamelCase = encoding["""pixel_values"""]
__UpperCamelCase = detr(__A )
__UpperCamelCase = model(__A )
assert torch.allclose(outputs.logits ,original_outputs["""pred_logits"""] ,atol=1E-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["""pred_boxes"""] ,atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["""pred_masks"""] ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f"nielsr/{model_name}" )
processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a__ : Union[str, Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
__SCREAMING_SNAKE_CASE = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__SCREAMING_SNAKE_CASE = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__SCREAMING_SNAKE_CASE = False
@property
def __lowerCamelCase ( self ) -> Dict:
return 3_2
@property
def __lowerCamelCase ( self ) -> int:
return 3_2
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def __lowerCamelCase ( self ) -> Dict:
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ) -> Any:
return 1_0_0
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__UpperCamelCase = MultilingualCLIP(lowercase )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def __lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCamelCase = UNetaDConditionModel(**lowercase )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__UpperCamelCase = DDIMScheduler(**lowercase )
__UpperCamelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowerCamelCase ( self , lowercase , lowercase=0 ) -> List[Any]:
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase ) ).to(lowercase )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowercase ) ).to(lowercase )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(lowercase ).startswith("""mps""" ):
__UpperCamelCase = torch.manual_seed(lowercase )
else:
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__UpperCamelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu"""
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCamelCase = """A red cartoon frog, 4k"""
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
__UpperCamelCase = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCamelCase = pipeline(
lowercase , image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
__UpperCamelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[0] * n for i in range(__A )]
for i in range(__A ):
__UpperCamelCase = y_points[i]
for i in range(2 ,__A ):
for j in range(__A ,__A ):
__UpperCamelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a__ : List[str] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase = 1_0_1 ) -> Optional[Any]:
__UpperCamelCase = length
def __len__( self ) -> int:
return self.length
def __getitem__( self , lowercase ) -> int:
return i
class UpperCAmelCase__ :
def __call__( self , lowercase ) -> Any:
return {"input_ids": torch.tensor(lowercase ), "labels": torch.tensor(lowercase )}
class UpperCAmelCase__ ( nn.Module):
def __init__( self ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__UpperCamelCase = nn.Linear(1_2_0 , 8_0 )
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Dict:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCAmelCase__ ( UpperCAmelCase_):
@require_torch_neuroncore
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"--output_dir {output_dir}".split()
__UpperCamelCase = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase__ ( UpperCAmelCase_):
@require_torch_multi_gpu
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"--output_dir {output_dir}".split()
__UpperCamelCase = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a__ : Optional[int] = HfArgumentParser((TrainingArguments,))
a__ : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
a__ : str = DummyDataset(dataset_length)
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = list(range(len(__A ) ) )
__UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
a__ : Optional[int] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a__ : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a__ : Union[str, Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a__ : Union[str, Any] = 2
a__ : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a__ : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a__ : Optional[Any] = None
| 349 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 | 1 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowercase ( __A ,__A ,__A ,__A=5 ):
'''simple docstring'''
assert masked_input.count("""<mask>""" ) == 1
__UpperCamelCase = torch.tensor(tokenizer.encode(__A ,add_special_tokens=__A ) ).unsqueeze(0 ) # Batch size 1
__UpperCamelCase = model(__A )[0] # The last hidden-state is the first element of the output tuple
__UpperCamelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__UpperCamelCase = logits[0, masked_index, :]
__UpperCamelCase = logits.softmax(dim=0 )
__UpperCamelCase , __UpperCamelCase = prob.topk(k=__A ,dim=0 )
__UpperCamelCase = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__A ) )] )
__UpperCamelCase = tokenizer.mask_token
__UpperCamelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
__UpperCamelCase = predicted_token_bpe.replace("""\u2581""" ,""" """ )
if " {0}".format(__A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(__A ) ,__A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__A ,__A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
a__ : Union[str, Any] = CamembertTokenizer.from_pretrained('camembert-base')
a__ : Dict = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
a__ : List[str] = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 349 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowercase ( __A ,__A ,__A ,__A ,__A ,__A = None ,):
'''simple docstring'''
__UpperCamelCase = {}
if train_file is not None:
__UpperCamelCase = [train_file]
if eval_file is not None:
__UpperCamelCase = [eval_file]
if test_file is not None:
__UpperCamelCase = [test_file]
__UpperCamelCase = datasets.load_dataset("""csv""" ,data_files=__A )
__UpperCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCamelCase = features_name.pop(__A )
__UpperCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCamelCase = {label: i for i, label in enumerate(__A )}
__UpperCamelCase = tokenizer.model_input_names
__UpperCamelCase = {}
if len(__A ) == 1:
for k in files.keys():
__UpperCamelCase = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=__A ,max_length=__A ,padding="""max_length""" ) ,batched=__A ,)
elif len(__A ) == 2:
for k in files.keys():
__UpperCamelCase = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=__A ,max_length=__A ,padding="""max_length""" ,) ,batched=__A ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCamelCase = (
tf.data.Dataset.from_generator(
__A ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCamelCase = (
tf.data.Dataset.from_generator(
__A ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCamelCase = (
tf.data.Dataset.from_generator(
__A ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a__ : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = field(metadata={'''help''': '''Which column contains the label'''})
__SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase_ , metadata={'''help''': '''The path of the training file'''})
__SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase_ , metadata={'''help''': '''The path of the development file'''})
__SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase_ , metadata={'''help''': '''The path of the test file'''})
__SCREAMING_SNAKE_CASE = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
__SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=__A ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(__A ) ,labelaid=__A ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="""text-classification""" ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
__UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool(""".bin""" in model_args.model_name_or_path ) ,config=__A ,cache_dir=model_args.cache_dir ,)
def compute_metrics(__A ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCamelCase = TFTrainer(
model=__A ,args=__A ,train_dataset=__A ,eval_dataset=__A ,compute_metrics=__A ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
with open(__A ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase=1_4 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=4 , lowercase=4 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=0.02 , ) -> Optional[int]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , lowercase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=lowercase , past_key_values=lowercase , position_ids=lowercase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , position_ids=lowercase , )
__UpperCamelCase = model(lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=lowercase , past_key_values=lowercase , position_ids=lowercase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase , position_ids=lowercase , )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowercase , lowercase , lowercase , lowercase )
@tooslow
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
__UpperCamelCase = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowercase , truncation=lowercase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
__UpperCamelCase = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowercase , lowercase )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(lowercase , lowercase )
__UpperCamelCase , __UpperCamelCase = pt_inputs["""input_ids"""].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(lowercase ).eval()
__UpperCamelCase = model_class(lowercase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**lowercase ).to_tuple()
__UpperCamelCase = fx_model(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase , lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase )
__UpperCamelCase = model_class.from_pretrained(lowercase , from_pt=lowercase )
__UpperCamelCase = fx_model_loaded(**lowercase ).to_tuple()
self.assertEqual(
len(lowercase ) , len(lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowercase , lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(lowercase , lowercase )
__UpperCamelCase = pt_model_class(lowercase ).eval()
__UpperCamelCase = model_class(lowercase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(lowercase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs["""input_ids"""].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**lowercase ).to_tuple()
__UpperCamelCase = fx_model(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase , lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase )
__UpperCamelCase = pt_model_class.from_pretrained(lowercase , from_flax=lowercase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**lowercase ).to_tuple()
self.assertEqual(
len(lowercase ) , len(lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase , lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a__ : Tuple = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
__UpperCamelCase = size if size is not None else {"""shortest_edge""": 2_5_6}
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
__UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__UpperCamelCase = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase ) -> np.ndarray:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Optional[int]:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" )
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
__UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Optional[int]:
__UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase ):
__UpperCamelCase = target_sizes.numpy()
__UpperCamelCase = []
for idx in range(len(lowercase ) ):
__UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase )
__UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
__UpperCamelCase = logits.argmax(dim=1 )
__UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" ,[
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" ,num_bytes=1_337 ,num_examples=42 ,dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" ,num_bytes=1_337 ,num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] ,)
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__A ) == len(__A )
__UpperCamelCase = SplitDict._from_yaml_list(__A )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" ,[SplitInfo(), SplitInfo(dataset_name=__A ), SplitInfo(dataset_name="""my_dataset""" )] )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 349 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _lowercase ( __A ,__A ,__A ,):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowercase ( __A ,__A ,__A ,):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
__A ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A = 10**9 ):
'''simple docstring'''
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 349 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
__UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __lowerCamelCase ( self ) -> List[str]:
print(f"Found {torch.cuda.device_count()} devices." )
__UpperCamelCase = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ) -> Union[str, Any]:
print(f"Found {torch.cuda.device_count()} devices." )
__UpperCamelCase = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ) -> Union[str, Any]:
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
__UpperCamelCase = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
a__ : str = Accelerator()
a__ : List[str] = (accelerator.state.process_index + 2, 1_0)
a__ : Dict = torch.randint(0, 1_0, shape).to(accelerator.device)
a__ : Tuple = ''
a__ : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a__ : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a__ : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
'''simple docstring'''
a__ : int = [0, 2, 4, 6, 8]
a__ : Optional[Any] = [1, 3, 5, 7, 9]
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 ,-1 ,-1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__UpperCamelCase = 0
for digit in range(10 ):
__UpperCamelCase = digit
result += reversible_numbers(
0 ,(remainder + 2 * digit) // 10 ,__A ,__A )
return result
__UpperCamelCase = 0
for digita in range(10 ):
__UpperCamelCase = digita
if (remainder + digita) % 2 == 0:
__UpperCamelCase = ODD_DIGITS
else:
__UpperCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
__UpperCamelCase = digita
result += reversible_numbers(
remaining_length - 2 ,(remainder + digita + digita) // 10 ,__A ,__A ,)
return result
def _lowercase ( __A = 9 ):
'''simple docstring'''
__UpperCamelCase = 0
for length in range(1 ,max_power + 1 ):
result += reversible_numbers(__A ,0 ,[0] * length ,__A )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : Optional[int] = logging.get_logger(__name__)
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
def constraint_to_multiple_of(__A ,__A ,__A=0 ,__A=None ):
__UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
__UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
__UpperCamelCase = (output_size, output_size) if isinstance(__A ,__A ) else output_size
__UpperCamelCase , __UpperCamelCase = get_image_size(__A )
__UpperCamelCase , __UpperCamelCase = output_size
# determine new height and width
__UpperCamelCase = output_height / input_height
__UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__UpperCamelCase = scale_width
else:
# fit height
__UpperCamelCase = scale_height
__UpperCamelCase = constraint_to_multiple_of(scale_height * input_height ,multiple=__A )
__UpperCamelCase = constraint_to_multiple_of(scale_width * input_width ,multiple=__A )
return (new_height, new_width)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = False , lowercase = 1 , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
__UpperCamelCase = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
__UpperCamelCase = get_size_dict(lowercase )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = keep_aspect_ratio
__UpperCamelCase = ensure_multiple_of
__UpperCamelCase = resample
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = False , lowercase = 1 , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__UpperCamelCase = get_resize_output_image_size(
lowercase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=lowercase , multiple=lowercase , )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(lowercase )
__UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
__UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[str]:
__UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase ):
__UpperCamelCase = target_sizes.numpy()
__UpperCamelCase = []
for idx in range(len(lowercase ) ):
__UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase )
__UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
__UpperCamelCase = logits.argmax(dim=1 )
__UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
_validate_point(__A )
_validate_point(__A )
if len(__A ) != len(__A ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ):
'''simple docstring'''
if point:
if isinstance(__A ,__A ):
for item in point:
if not isinstance(__A ,(int, float) ):
__UpperCamelCase = (
"""Expected a list of numbers as input, found """
f"{type(__A ).__name__}"
)
raise TypeError(__A )
else:
__UpperCamelCase = f"Expected a list of numbers as input, found {type(__A ).__name__}"
raise TypeError(__A )
else:
raise ValueError("""Missing an input""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
_validate_point(__A )
_validate_point(__A )
if len(__A ) != len(__A ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__A ,__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 1 |
'''simple docstring'''
from math import factorial
a__ : int = {str(d): factorial(d) for d in range(1_0)}
def _lowercase ( __A ):
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__A ) )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,__A ) if sum_of_digit_factorial(__A ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 349 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase__ ( UpperCAmelCase_):
def __get__( self , lowercase , lowercase=None ) -> int:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__UpperCamelCase = """__cached_""" + self.fget.__name__
__UpperCamelCase = getattr(lowercase , lowercase , lowercase )
if cached is None:
__UpperCamelCase = self.fget(lowercase )
setattr(lowercase , lowercase , lowercase )
return cached
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def _lowercase ( __A ):
'''simple docstring'''
if is_torch_fx_proxy(__A ):
return True
if is_torch_available():
import torch
if isinstance(__A ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__A ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__A ,(jnp.ndarray, Tracer) ):
return True
return isinstance(__A ,np.ndarray )
def _lowercase ( __A ):
'''simple docstring'''
return isinstance(__A ,np.ndarray )
def _lowercase ( __A ):
'''simple docstring'''
return _is_numpy(__A )
def _lowercase ( __A ):
'''simple docstring'''
import torch
return isinstance(__A ,torch.Tensor )
def _lowercase ( __A ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(__A )
def _lowercase ( __A ):
'''simple docstring'''
import torch
return isinstance(__A ,torch.device )
def _lowercase ( __A ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(__A )
def _lowercase ( __A ):
'''simple docstring'''
import torch
if isinstance(__A ,__A ):
if hasattr(__A ,__A ):
__UpperCamelCase = getattr(__A ,__A )
else:
return False
return isinstance(__A ,torch.dtype )
def _lowercase ( __A ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(__A )
def _lowercase ( __A ):
'''simple docstring'''
import tensorflow as tf
return isinstance(__A ,tf.Tensor )
def _lowercase ( __A ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(__A )
def _lowercase ( __A ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__A ,"""is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(__A )
return type(__A ) == tf.Tensor
def _lowercase ( __A ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(__A )
def _lowercase ( __A ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(__A ,jnp.ndarray )
def _lowercase ( __A ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(__A )
def _lowercase ( __A ):
'''simple docstring'''
if isinstance(__A ,(dict, UserDict) ):
return {k: to_py_obj(__A ) for k, v in obj.items()}
elif isinstance(__A ,(list, tuple) ):
return [to_py_obj(__A ) for o in obj]
elif is_tf_tensor(__A ):
return obj.numpy().tolist()
elif is_torch_tensor(__A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__A ):
return np.asarray(__A ).tolist()
elif isinstance(__A ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowercase ( __A ):
'''simple docstring'''
if isinstance(__A ,(dict, UserDict) ):
return {k: to_numpy(__A ) for k, v in obj.items()}
elif isinstance(__A ,(list, tuple) ):
return np.array(__A )
elif is_tf_tensor(__A ):
return obj.numpy()
elif is_torch_tensor(__A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__A ):
return np.asarray(__A )
else:
return obj
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = fields(self )
# Safety and consistency checks
if not len(lowercase ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
__UpperCamelCase = getattr(self , class_fields[0].name )
__UpperCamelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase ):
if isinstance(lowercase , lowercase ):
__UpperCamelCase = first_field.items()
__UpperCamelCase = True
else:
try:
__UpperCamelCase = iter(lowercase )
__UpperCamelCase = True
except TypeError:
__UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase ):
if (
not isinstance(lowercase , (list, tuple) )
or not len(lowercase ) == 2
or not isinstance(element[0] , lowercase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCamelCase = element[1]
elif first_field is not None:
__UpperCamelCase = first_field
else:
for field in class_fields:
__UpperCamelCase = getattr(self , field.name )
if v is not None:
__UpperCamelCase = v
def __delitem__( self , *lowercase , **lowercase ) -> Union[str, Any]:
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> int:
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> Optional[int]:
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> Dict:
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self , lowercase ) -> int:
if isinstance(lowercase , lowercase ):
__UpperCamelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , lowercase , lowercase ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase , lowercase )
super().__setattr__(lowercase , lowercase )
def __setitem__( self , lowercase , lowercase ) -> Tuple:
# Will raise a KeyException if needed
super().__setitem__(lowercase , lowercase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> List[str]:
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''longest'''
__SCREAMING_SNAKE_CASE = '''max_length'''
__SCREAMING_SNAKE_CASE = '''do_not_pad'''
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''pt'''
__SCREAMING_SNAKE_CASE = '''tf'''
__SCREAMING_SNAKE_CASE = '''np'''
__SCREAMING_SNAKE_CASE = '''jax'''
class UpperCAmelCase__ :
def __init__( self , lowercase ) -> Any:
__UpperCamelCase = context_managers
__UpperCamelCase = ExitStack()
def __enter__( self ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase )
def __exit__( self , *lowercase , **lowercase ) -> int:
self.stack.__exit__(*lowercase , **lowercase )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = infer_framework(__A )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = model_class.__name__
__UpperCamelCase = infer_framework(__A )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowercase ( __A ,__A = "" ,__A = "." ):
'''simple docstring'''
def _flatten_dict(__A ,__A="" ,__A="." ):
for k, v in d.items():
__UpperCamelCase = str(__A ) + delimiter + str(__A ) if parent_key else k
if v and isinstance(__A ,__A ):
yield from flatten_dict(__A ,__A ,delimiter=__A ).items()
else:
yield key, v
return dict(_flatten_dict(__A ,__A ,__A ) )
@contextmanager
def _lowercase ( __A ,__A = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowercase ( __A ,__A=None ):
'''simple docstring'''
if is_numpy_array(__A ):
return np.transpose(__A ,axes=__A )
elif is_torch_tensor(__A ):
return array.T if axes is None else array.permute(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.transpose(__A ,perm=__A )
elif is_jax_tensor(__A ):
return jnp.transpose(__A ,axes=__A )
else:
raise ValueError(f"Type not supported for transpose: {type(__A )}." )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if is_numpy_array(__A ):
return np.reshape(__A ,__A )
elif is_torch_tensor(__A ):
return array.reshape(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.reshape(__A ,__A )
elif is_jax_tensor(__A ):
return jnp.reshape(__A ,__A )
else:
raise ValueError(f"Type not supported for reshape: {type(__A )}." )
def _lowercase ( __A ,__A=None ):
'''simple docstring'''
if is_numpy_array(__A ):
return np.squeeze(__A ,axis=__A )
elif is_torch_tensor(__A ):
return array.squeeze() if axis is None else array.squeeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.squeeze(__A ,axis=__A )
elif is_jax_tensor(__A ):
return jnp.squeeze(__A ,axis=__A )
else:
raise ValueError(f"Type not supported for squeeze: {type(__A )}." )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if is_numpy_array(__A ):
return np.expand_dims(__A ,__A )
elif is_torch_tensor(__A ):
return array.unsqueeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.expand_dims(__A ,axis=__A )
elif is_jax_tensor(__A ):
return jnp.expand_dims(__A ,axis=__A )
else:
raise ValueError(f"Type not supported for expand_dims: {type(__A )}." )
def _lowercase ( __A ):
'''simple docstring'''
if is_numpy_array(__A ):
return np.size(__A )
elif is_torch_tensor(__A ):
return array.numel()
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.size(__A )
elif is_jax_tensor(__A ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(__A )}." )
def _lowercase ( __A ,__A ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(__A ,(tuple, list) ):
__UpperCamelCase = [f"{repo_id}--{v}" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCamelCase = f"{repo_id}--{value}"
return auto_map
def _lowercase ( __A ):
'''simple docstring'''
for base_class in inspect.getmro(__A ):
__UpperCamelCase = base_class.__module__
__UpperCamelCase = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 349 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''WhisperFeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''WhisperTokenizer'''
def __init__( self , lowercase , lowercase ) -> Optional[Any]:
super().__init__(lowercase , lowercase )
__UpperCamelCase = self.feature_extractor
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
return self.tokenizer.get_decoder_prompt_ids(task=lowercase , language=lowercase , no_timestamps=lowercase )
def __call__( self , *lowercase , **lowercase ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
__UpperCamelCase = kwargs.pop("""audio""" , lowercase )
__UpperCamelCase = kwargs.pop("""sampling_rate""" , lowercase )
__UpperCamelCase = kwargs.pop("""text""" , lowercase )
if len(lowercase ) > 0:
__UpperCamelCase = args[0]
__UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__UpperCamelCase = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
__UpperCamelCase = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase = encodings["""input_ids"""]
return inputs
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> int:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> Tuple:
return self.tokenizer.decode(*lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase="np" ) -> Union[str, Any]:
return self.tokenizer.get_prompt_ids(lowercase , return_tensors=lowercase )
| 349 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase=2 , lowercase=3 , lowercase=4 , lowercase=2 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_6 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.02 , lowercase=6 , lowercase=6 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=1_0_0_0 , ) -> Tuple:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = coordinate_size
__UpperCamelCase = shape_size
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
__UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCamelCase = text_seq_length
__UpperCamelCase = (image_size // patch_size) ** 2 + 1
__UpperCamelCase = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__UpperCamelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCamelCase = bbox[i, j, 3]
__UpperCamelCase = bbox[i, j, 1]
__UpperCamelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCamelCase = bbox[i, j, 2]
__UpperCamelCase = bbox[i, j, 0]
__UpperCamelCase = tmp_coordinate
__UpperCamelCase = tf.constant(lowercase )
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFLayoutLMvaModel(config=lowercase )
# text + image
__UpperCamelCase = model(lowercase , pixel_values=lowercase , training=lowercase )
__UpperCamelCase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , training=lowercase , )
__UpperCamelCase = model(lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCamelCase = model(lowercase , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCamelCase = model({"""pixel_values""": pixel_values} , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFLayoutLMvaForSequenceClassification(config=lowercase )
__UpperCamelCase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFLayoutLMvaForTokenClassification(config=lowercase )
__UpperCamelCase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2
__UpperCamelCase = TFLayoutLMvaForQuestionAnswering(config=lowercase )
__UpperCamelCase = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , training=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
return True
def __lowerCamelCase ( self , lowercase , lowercase , lowercase=False ) -> dict:
__UpperCamelCase = copy.deepcopy(lowercase )
if model_class in get_values(lowercase ):
__UpperCamelCase = {
k: tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase ):
__UpperCamelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
__UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
__UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
__UpperCamelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = TFLayoutLMvaModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , hidden_size=3_7 )
def __lowerCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(lowercase )
if getattr(lowercase , """hf_compute_loss""" , lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
__UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
__UpperCamelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase )[0]
]
__UpperCamelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
__UpperCamelCase = prepared_for_class.pop("""input_ids""" )
__UpperCamelCase = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
__UpperCamelCase = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__UpperCamelCase = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__UpperCamelCase = -1_0_0
__UpperCamelCase = tf.convert_to_tensor(lowercase )
__UpperCamelCase = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
__UpperCamelCase = model(lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
# Get keys that were added with the _prepare_for_class function
__UpperCamelCase = prepared_for_class.keys() - inputs_dict.keys()
__UpperCamelCase = inspect.signature(model.call ).parameters
__UpperCamelCase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__UpperCamelCase = {0: """input_ids"""}
for label_key in label_keys:
__UpperCamelCase = signature_names.index(lowercase )
__UpperCamelCase = label_key
__UpperCamelCase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__UpperCamelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__UpperCamelCase = prepared_for_class[value]
__UpperCamelCase = tuple(lowercase )
# Send to model
__UpperCamelCase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ) -> Any:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> str:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase = type
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> int:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> str:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
@slow
def __lowerCamelCase ( self ) -> Any:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFLayoutLMvaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ) -> Union[str, Any]:
return LayoutLMvaImageProcessor(apply_ocr=lowercase ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=lowercase , return_tensors="""tf""" ).pixel_values
__UpperCamelCase = tf.constant([[1, 2]] )
__UpperCamelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__UpperCamelCase = model(input_ids=lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
# verify the logits
__UpperCamelCase = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
__UpperCamelCase = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ) )
| 349 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
| 349 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = ArgumentParser("""Diffusers CLI tool""" ,usage="""diffusers-cli <command> [<args>]""" )
__UpperCamelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__A )
# Let's go
__UpperCamelCase = parser.parse_args()
if not hasattr(__A ,"""func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCamelCase = args.func(__A )
service.run()
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=__A )
__UpperCamelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=__A )
env_command_parser(subparsers=__A )
launch_command_parser(subparsers=__A )
tpu_command_parser(subparsers=__A )
test_command_parser(subparsers=__A )
# Let's go
__UpperCamelCase = parser.parse_args()
if not hasattr(__A ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(__A )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = 384
if "tiny" in model_name:
__UpperCamelCase = [3, 3, 9, 3]
__UpperCamelCase = [96, 192, 384, 768]
if "small" in model_name:
__UpperCamelCase = [3, 3, 27, 3]
__UpperCamelCase = [96, 192, 384, 768]
if "base" in model_name:
__UpperCamelCase = [3, 3, 27, 3]
__UpperCamelCase = [128, 256, 512, 1_024]
__UpperCamelCase = 512
if "large" in model_name:
__UpperCamelCase = [3, 3, 27, 3]
__UpperCamelCase = [192, 384, 768, 1_536]
__UpperCamelCase = 768
if "xlarge" in model_name:
__UpperCamelCase = [3, 3, 27, 3]
__UpperCamelCase = [256, 512, 1_024, 2_048]
__UpperCamelCase = 1_024
# set label information
__UpperCamelCase = 150
__UpperCamelCase = """huggingface/label-files"""
__UpperCamelCase = """ade20k-id2label.json"""
__UpperCamelCase = json.load(open(hf_hub_download(__A ,__A ,repo_type="""dataset""" ) ,"""r""" ) )
__UpperCamelCase = {int(__A ): v for k, v in idalabel.items()}
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = ConvNextConfig(
depths=__A ,hidden_sizes=__A ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__UpperCamelCase = UperNetConfig(
backbone_config=__A ,auxiliary_in_channels=__A ,num_labels=__A ,idalabel=__A ,labelaid=__A ,)
return config
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dct.pop(__A )
__UpperCamelCase = val
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
__UpperCamelCase = model_name_to_url[model_name]
__UpperCamelCase = torch.hub.load_state_dict_from_url(__A ,map_location="""cpu""" )["""state_dict"""]
__UpperCamelCase = get_upernet_config(__A )
__UpperCamelCase = UperNetForSemanticSegmentation(__A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCamelCase = state_dict.pop(__A )
if "bn" in key:
__UpperCamelCase = key.replace("""bn""" ,"""batch_norm""" )
__UpperCamelCase = val
# rename keys
__UpperCamelCase = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A ,__A ,__A )
model.load_state_dict(__A )
# verify on image
__UpperCamelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__UpperCamelCase = Image.open(requests.get(__A ,stream=__A ).raw ).convert("""RGB""" )
__UpperCamelCase = SegformerImageProcessor()
__UpperCamelCase = processor(__A ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__UpperCamelCase = model(__A )
if model_name == "upernet-convnext-tiny":
__UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCamelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCamelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCamelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCamelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,__A ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
a__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : List[Any] = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['PerceiverFeatureExtractor']
a__ : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : Optional[int] = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__UpperCamelCase = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
__UpperCamelCase = """The dog is cute and lives in the garden house"""
__UpperCamelCase = jnp.array([tokenizer.encode(lowercase )] )
__UpperCamelCase = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__UpperCamelCase = model(lowercase )["""last_hidden_state"""]
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
'''simple docstring'''
import requests
a__ : str = 'YOUR API KEY'
def _lowercase ( __A ,__A = giphy_api_key ):
'''simple docstring'''
__UpperCamelCase = """+""".join(query.split() )
__UpperCamelCase = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
__UpperCamelCase = requests.get(__A ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 349 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCamelCase = CLIPTextModel(lowercase )
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCamelCase ( self , lowercase , lowercase=0 ) -> int:
if str(lowercase ).startswith("""mps""" ):
__UpperCamelCase = torch.manual_seed(lowercase )
else:
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__UpperCamelCase = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__UpperCamelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """."""
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__UpperCamelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """."""
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__UpperCamelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """."""
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" , )
__UpperCamelCase = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ):
'''simple docstring'''
if not nums:
return 0
__UpperCamelCase = nums[0]
__UpperCamelCase = 0
for num in nums[1:]:
__UpperCamelCase , __UpperCamelCase = (
max_excluding + num,
max(__A ,__A ),
)
return max(__A ,__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self , lowercase ) -> Any:
with open(lowercase , encoding="""utf-8""" ) as input_file:
__UpperCamelCase = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__UpperCamelCase = input_file.read()
__UpperCamelCase = regexp.search(lowercase )
return match
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
with open(lowercase , encoding="""utf-8""" ) as input_file:
__UpperCamelCase = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase = regexp.finditer(lowercase )
__UpperCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = Path("""./datasets""" )
__UpperCamelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = Path("""./datasets""" )
__UpperCamelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowercase ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = MobileBertConfig.from_json_file(__A )
print(f"Building PyTorch model from configuration: {config}" )
__UpperCamelCase = MobileBertForPreTraining(__A )
# Load weights from tf checkpoint
__UpperCamelCase = load_tf_weights_in_mobilebert(__A ,__A ,__A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() ,__A )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 349 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : int = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''trocr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , lowercase=5_0_2_6_5 , lowercase=1_0_2_4 , lowercase=1_2 , lowercase=1_6 , lowercase=4_0_9_6 , lowercase="gelu" , lowercase=5_1_2 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=2 , lowercase=0.02 , lowercase=0.0 , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ) -> str:
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = init_std
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = scale_embedding
__UpperCamelCase = use_learned_position_embeddings
__UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 42
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
@register_to_config
def __init__( self , lowercase = 3_2 , lowercase = 6_4 , lowercase = 2_0 , lowercase = 7_6_8 , lowercase=7_7 , lowercase=4 , lowercase = 0.0 , lowercase = "silu" , lowercase = None , lowercase = None , lowercase = "linear" , lowercase = "prd" , lowercase = None , lowercase = None , lowercase = None , ) -> Any:
super().__init__()
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = num_attention_heads * attention_head_dim
__UpperCamelCase = additional_embeddings
__UpperCamelCase = time_embed_dim or inner_dim
__UpperCamelCase = embedding_proj_dim or embedding_dim
__UpperCamelCase = clip_embed_dim or embedding_dim
__UpperCamelCase = Timesteps(lowercase , lowercase , 0 )
__UpperCamelCase = TimestepEmbedding(lowercase , lowercase , out_dim=lowercase , act_fn=lowercase )
__UpperCamelCase = nn.Linear(lowercase , lowercase )
if embedding_proj_norm_type is None:
__UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
__UpperCamelCase = nn.LayerNorm(lowercase )
else:
raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__UpperCamelCase = nn.Linear(lowercase , lowercase )
if encoder_hid_proj_type is None:
__UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
__UpperCamelCase = nn.Linear(lowercase , lowercase )
else:
raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowercase ) )
if added_emb_type == "prd":
__UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowercase ) )
elif added_emb_type is None:
__UpperCamelCase = None
else:
raise ValueError(
f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowercase , lowercase , lowercase , dropout=lowercase , activation_fn="""gelu""" , attention_bias=lowercase , )
for d in range(lowercase )
] )
if norm_in_type == "layer":
__UpperCamelCase = nn.LayerNorm(lowercase )
elif norm_in_type is None:
__UpperCamelCase = None
else:
raise ValueError(f"Unsupported norm_in_type: {norm_in_type}." )
__UpperCamelCase = nn.LayerNorm(lowercase )
__UpperCamelCase = nn.Linear(lowercase , lowercase )
__UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , lowercase , persistent=lowercase )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , lowercase ) )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ) -> Dict[str, AttentionProcessor]:
__UpperCamelCase = {}
def fn_recursive_add_processors(lowercase , lowercase , lowercase ):
if hasattr(lowercase , """set_processor""" ):
__UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , lowercase , lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase , lowercase , lowercase )
return processors
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(lowercase , lowercase ) and len(lowercase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(lowercase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowercase , lowercase , lowercase ):
if hasattr(lowercase , """set_processor""" ):
if not isinstance(lowercase , lowercase ):
module.set_processor(lowercase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , lowercase , lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = True , ) -> List[str]:
__UpperCamelCase = hidden_states.shape[0]
__UpperCamelCase = timestep
if not torch.is_tensor(lowercase ):
__UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
__UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase = timesteps * torch.ones(lowercase , dtype=timesteps.dtype , device=timesteps.device )
__UpperCamelCase = self.time_proj(lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
__UpperCamelCase = self.time_embedding(lowercase )
if self.embedding_proj_norm is not None:
__UpperCamelCase = self.embedding_proj_norm(lowercase )
__UpperCamelCase = self.embedding_proj(lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCamelCase = self.encoder_hidden_states_proj(lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__UpperCamelCase = self.proj_in(lowercase )
__UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
__UpperCamelCase = []
__UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCamelCase = hidden_states[:, None, :]
__UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowercase , -1 , -1 )
additional_embeds.append(lowercase )
__UpperCamelCase = torch.cat(
lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCamelCase = F.pad(
lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCamelCase = F.pad(lowercase , (0, self.additional_embeddings) , value=0.0 )
__UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCamelCase = self.norm_in(lowercase )
for block in self.transformer_blocks:
__UpperCamelCase = block(lowercase , attention_mask=lowercase )
__UpperCamelCase = self.norm_out(lowercase )
if self.prd_embedding is not None:
__UpperCamelCase = hidden_states[:, -1]
else:
__UpperCamelCase = hidden_states[:, additional_embeddings_len:]
__UpperCamelCase = self.proj_to_clip_embeddings(lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowercase )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
__UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 349 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Optional[int] = logging.get_logger(__name__)
a__ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Dict = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
a__ : List[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , **lowercase , ) -> List[str]:
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , add_prefix_space=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__UpperCamelCase = getattr(lowercase , pre_tok_state.pop("""type""" ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**lowercase )
__UpperCamelCase = add_prefix_space
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def __lowerCamelCase ( self , lowercase ) -> List[int]:
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase , add_special_tokens=lowercase ) + [self.eos_token_id] )
if len(lowercase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a__ : Tuple = getLogger(__name__)
a__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
def _lowercase ( __A ,__A ,__A ,__A = 8 ,__A = DEFAULT_DEVICE ,__A=False ,__A="summarization" ,__A=None ,**__A ,):
'''simple docstring'''
__UpperCamelCase = Path(__A ).open("""w""" ,encoding="""utf-8""" )
__UpperCamelCase = str(__A )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__A ).to(__A )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(__A ,__A )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config ,"""prefix""" ,"""""" ) or """"""
for examples_chunk in tqdm(list(chunks(__A ,__A ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(__A ,return_tensors="""pt""" ,truncation=__A ,padding="""longest""" ).to(__A )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**__A ,)
__UpperCamelCase = tokenizer.batch_decode(__A ,skip_special_tokens=__A ,clean_up_tokenization_spaces=__A )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(__A )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def _lowercase ( ):
'''simple docstring'''
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def _lowercase ( __A=True ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" ,type=__A ,help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" ,type=__A ,help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" ,type=__A ,help="""where to save summaries""" )
parser.add_argument("""--reference_path""" ,type=__A ,required=__A ,help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" ,type=__A ,required=__A ,default="""metrics.json""" ,help="""where to save metrics""" )
parser.add_argument("""--device""" ,type=__A ,required=__A ,default=__A ,help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" ,type=__A ,required=__A ,default=__A ,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" ,type=__A ,default="""summarization""" ,help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" ,type=__A ,default=8 ,required=__A ,help="""batch size""" )
parser.add_argument(
"""--n_obs""" ,type=__A ,default=-1 ,required=__A ,help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" ,action="""store_true""" )
parser.add_argument("""--dump-args""" ,action="""store_true""" ,help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" ,nargs="""?""" ,type=__A ,const=datetime_now() ,help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(__A )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__A )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__UpperCamelCase = generate_summaries_or_translations(
__A ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**__A ,)
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__A )]
__UpperCamelCase = score_fn(__A ,__A )
scores.update(__A )
if args.dump_args:
scores.update(__A )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(__A )
if args.score_path is not None:
json.dump(__A ,open(args.score_path ,"""w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : Tuple = 'CompVis/stable-diffusion-v1-1'
a__ : Union[str, Any] = 'CompVis/stable-diffusion-v1-2'
a__ : Dict = 'CompVis/stable-diffusion-v1-3'
a__ : Optional[int] = 'CompVis/stable-diffusion-v1-4'
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Tuple:
super()._init_()
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline(
vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , requires_safety_checker=lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCamelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , lowercase ) for k in self.config.keys() if not k.startswith("""_""" )}
def __lowerCamelCase ( self , lowercase = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
self.enable_attention_slicing(lowercase )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 5_0 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 5_0 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> Dict:
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 5_0 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> Union[str, Any]:
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 5_0 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> Optional[int]:
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 5_0 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> Union[str, Any]:
__UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
if index == number_of_items:
return 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = knapsack(__A ,__A ,__A ,__A ,index + 1 )
if weights[index] <= max_weight:
__UpperCamelCase = values[index] + knapsack(
__A ,__A ,__A ,max_weight - weights[index] ,index + 1 )
return max(__A ,__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 349 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[int] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase = text, pattern
__UpperCamelCase , __UpperCamelCase = len(lowercase ), len(lowercase )
def __lowerCamelCase ( self , lowercase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCamelCase ( self , lowercase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCamelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
__UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCamelCase = self.mismatch_in_text(lowercase )
if mismatch_index == -1:
positions.append(lowercase )
else:
__UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a__ : Tuple = 'ABAABA'
a__ : Any = 'AB'
a__ : Tuple = BoyerMooreSearch(text, pattern)
a__ : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 349 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = abs(__A )
__UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = abs(__A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _lowercase ( __A ):
'''simple docstring'''
return sum(int(__A ) for c in str(abs(__A ) ) )
def _lowercase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A ,__A ) -> None:
__UpperCamelCase = f"{func.__name__}({value})"
__UpperCamelCase = timeit(f"__main__.{call}" ,setup="""import __main__""" )
print(f"{call:56} = {func(__A )} -- {timing:.4f} seconds" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__A ,__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 349 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[int] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a__ : str = logging.get_logger(__name__)
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__A ) == len(__A ), f"{len(__A )} != {len(__A )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a__ : Optional[int] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a__ : Union[str, Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def _lowercase ( __A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(__A ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(__A ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowercase ( __A ,__A = "student" ,__A = None ,__A = None ,__A=False ,__A=None ,__A=None ,**__A ,):
'''simple docstring'''
__UpperCamelCase = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(__A ,__A ):
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A ) # purely for convenience
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__A ).eval()
else:
assert isinstance(__A ,__A ), f"teacher must be a model or string got type {type(__A )}"
__UpperCamelCase = teacher.config.to_diff_dict()
try:
__UpperCamelCase , __UpperCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__UpperCamelCase = teacher_e
if d is None:
__UpperCamelCase = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config ,"""num_encoder_layers""" ):
__UpperCamelCase , __UpperCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__UpperCamelCase , __UpperCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__UpperCamelCase = teacher_e
if d is None:
__UpperCamelCase = teacher_d
if hasattr(teacher.config ,"""num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__A )
# Copy weights
__UpperCamelCase = teacher.config_class(**__A )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_config(__A )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__UpperCamelCase = student.load_state_dict(teacher.state_dict() ,strict=__A )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__UpperCamelCase , __UpperCamelCase = list(range(__A ) ), list(range(__A ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(__A )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__UpperCamelCase = pick_layers_to_copy(__A ,__A )
if d_layers_to_copy is None:
__UpperCamelCase = pick_layers_to_copy(__A ,__A )
try:
if hasattr(
__A ,"""prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers ,student.prophetnet.encoder.layers ,__A )
copy_layers(teacher.prophetnet.decoder.layers ,student.prophetnet.decoder.layers ,__A )
else:
copy_layers(teacher.model.encoder.layers ,student.model.encoder.layers ,__A )
copy_layers(teacher.model.decoder.layers ,student.model.decoder.layers ,__A )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block ,student.encoder.block ,__A )
copy_layers(teacher.decoder.block ,student.decoder.block ,__A )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__UpperCamelCase = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(__A )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
super().__init__(**lowercase )
__UpperCamelCase = size if size is not None else {"""shortest_edge""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
__UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase = do_convert_rgb
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__UpperCamelCase = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Any:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
__UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowercase ( __A ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCAmelCase__ ( nn.Module):
def __init__( self , lowercase , lowercase ) -> Dict:
super().__init__()
__UpperCamelCase = module
__UpperCamelCase = nn.Sequential(
nn.Linear(module.in_features , lowercase , bias=lowercase ) , nn.Linear(lowercase , module.out_features , bias=lowercase ) , )
__UpperCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowercase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __lowerCamelCase ( self , lowercase , *lowercase , **lowercase ) -> Optional[int]:
return self.module(lowercase , *lowercase , **lowercase ) + self.adapter(lowercase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__SCREAMING_SNAKE_CASE = '''bigscience/bloom-1b7'''
# Constant values
__SCREAMING_SNAKE_CASE = 2.109659552692574
__SCREAMING_SNAKE_CASE = '''Hello my name is'''
__SCREAMING_SNAKE_CASE = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''')
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''')
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''')
__SCREAMING_SNAKE_CASE = 1_0
def __lowerCamelCase ( self ) -> str:
# Models and tokenizer
__UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> Optional[int]:
super().setUp()
# Models and tokenizer
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase , device_map="""auto""" )
def __lowerCamelCase ( self ) -> Tuple:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_abit.config
self.assertTrue(hasattr(lowercase , """quantization_config""" ) )
__UpperCamelCase = config.to_dict()
__UpperCamelCase = config.to_diff_dict()
__UpperCamelCase = config.to_json_string()
def __lowerCamelCase ( self ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
__UpperCamelCase = self.model_fpaa.get_memory_footprint()
__UpperCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__UpperCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __lowerCamelCase ( self ) -> str:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowercase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__UpperCamelCase = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase ) , self.EXPECTED_OUTPUTS )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = BitsAndBytesConfig()
__UpperCamelCase = True
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowercase , device_map="""auto""" )
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__UpperCamelCase = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase ) , self.EXPECTED_OUTPUTS )
def __lowerCamelCase ( self ) -> Tuple:
with self.assertRaises(lowercase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = BitsAndBytesConfig()
with self.assertRaises(lowercase ):
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowercase , load_in_abit=lowercase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __lowerCamelCase ( self ) -> Union[str, Any]:
with self.assertRaises(lowercase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowercase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowercase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowercase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowercase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__UpperCamelCase = self.model_fpaa.to(torch.floataa )
__UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.half()
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.float()
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowercase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase):
@classmethod
def __lowerCamelCase ( cls ) -> List[str]:
__UpperCamelCase = """t5-small"""
__UpperCamelCase = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
__UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
__UpperCamelCase = """Translate in German: Hello, my dog is cute"""
def __lowerCamelCase ( self ) -> int:
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> int:
from transformers import TaForConditionalGeneration
__UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__UpperCamelCase = None
# test with `t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase , device_map="""auto""" )
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__UpperCamelCase = model.generate(**lowercase )
# test with `flan-t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowercase , device_map="""auto""" )
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__UpperCamelCase = model.generate(**lowercase )
__UpperCamelCase = modules
def __lowerCamelCase ( self ) -> Optional[int]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__UpperCamelCase = model.generate(**lowercase )
# test with `flan-t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowercase , device_map="""auto""" )
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__UpperCamelCase = model.generate(**lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> Tuple:
super().setUp()
# model_name
__UpperCamelCase = """bigscience/bloom-560m"""
__UpperCamelCase = """t5-small"""
# Different types of model
__UpperCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=lowercase , device_map="""auto""" )
# Sequence classification model
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowercase , device_map="""auto""" )
# CausalLM model
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase , device_map="""auto""" )
# Seq2seq model
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowercase , device_map="""auto""" )
def __lowerCamelCase ( self ) -> int:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> List[str]:
super().setUp()
def __lowerCamelCase ( self ) -> List[str]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__UpperCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> Dict:
super().setUp()
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowercase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__UpperCamelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
__UpperCamelCase = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowercase ) , self.EXPECTED_OUTPUTS )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = """facebook/opt-350m"""
super().setUp()
def __lowerCamelCase ( self ) -> Dict:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__UpperCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__UpperCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowercase ) ):
__UpperCamelCase = LoRALayer(module.q_proj , rank=1_6 )
__UpperCamelCase = LoRALayer(module.k_proj , rank=1_6 )
__UpperCamelCase = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
__UpperCamelCase = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__UpperCamelCase = model.forward(**lowercase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowercase , lowercase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowercase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gpt2-xl'''
__SCREAMING_SNAKE_CASE = 3.3191854854152187
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase = True , lowercase = False ) -> Any:
__UpperCamelCase = scheduler
__UpperCamelCase = optimizers if isinstance(lowercase , (list, tuple) ) else [optimizers]
__UpperCamelCase = split_batches
__UpperCamelCase = step_with_optimizer
__UpperCamelCase = GradientState()
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> str:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase , **lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase , **lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__UpperCamelCase = AcceleratorState().num_processes
for _ in range(lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase , **lowercase )
else:
self.scheduler.step(*lowercase , **lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCamelCase ( self ) -> str:
return self.scheduler.state_dict()
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
self.scheduler.load_state_dict(lowercase )
def __lowerCamelCase ( self ) -> Tuple:
return self.scheduler.get_lr()
def __lowerCamelCase ( self , *lowercase , **lowercase ) -> Union[str, Any]:
return self.scheduler.print_lr(*lowercase , **lowercase )
| 349 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''cvt'''
def __init__( self , lowercase=3 , lowercase=[7, 3, 3] , lowercase=[4, 2, 2] , lowercase=[2, 1, 1] , lowercase=[6_4, 1_9_2, 3_8_4] , lowercase=[1, 3, 6] , lowercase=[1, 2, 1_0] , lowercase=[4.0, 4.0, 4.0] , lowercase=[0.0, 0.0, 0.0] , lowercase=[0.0, 0.0, 0.0] , lowercase=[0.0, 0.0, 0.1] , lowercase=[True, True, True] , lowercase=[False, False, True] , lowercase=["dw_bn", "dw_bn", "dw_bn"] , lowercase=[3, 3, 3] , lowercase=[1, 1, 1] , lowercase=[2, 2, 2] , lowercase=[1, 1, 1] , lowercase=[1, 1, 1] , lowercase=0.02 , lowercase=1E-12 , **lowercase , ) -> str:
super().__init__(**lowercase )
__UpperCamelCase = num_channels
__UpperCamelCase = patch_sizes
__UpperCamelCase = patch_stride
__UpperCamelCase = patch_padding
__UpperCamelCase = embed_dim
__UpperCamelCase = num_heads
__UpperCamelCase = depth
__UpperCamelCase = mlp_ratio
__UpperCamelCase = attention_drop_rate
__UpperCamelCase = drop_rate
__UpperCamelCase = drop_path_rate
__UpperCamelCase = qkv_bias
__UpperCamelCase = cls_token
__UpperCamelCase = qkv_projection_method
__UpperCamelCase = kernel_qkv
__UpperCamelCase = padding_kv
__UpperCamelCase = stride_kv
__UpperCamelCase = padding_q
__UpperCamelCase = stride_q
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
a__ : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
a__ : Optional[Any] = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = CamembertTokenizer
__SCREAMING_SNAKE_CASE = CamembertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = CamembertTokenizer(lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = """<pad>"""
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowercase ) , 1_0_0_4 )
def __lowerCamelCase ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = CamembertTokenizer(lowercase )
tokenizer.save_pretrained(self.tmpdirname )
__UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__UpperCamelCase = """I was born in 92000, and this is falsé."""
__UpperCamelCase = tokenizer.encode(lowercase )
__UpperCamelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__UpperCamelCase = tokenizer.convert_ids_to_tokens(lowercase )
__UpperCamelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = """I was born in 92000, and this is falsé."""
__UpperCamelCase = tokenizer.tokenize(lowercase )
__UpperCamelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = tokenizer.encode(lowercase )
__UpperCamelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
@slow
def __lowerCamelCase ( self ) -> Dict:
# fmt: off
__UpperCamelCase = {"""input_ids""": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__UpperCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=lowercase , )
| 349 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
'''simple docstring'''
from math import factorial, pi
def _lowercase ( __A ,__A = 30 ):
'''simple docstring'''
if not isinstance(__A ,(int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__A ,__A ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
__UpperCamelCase = float(__A )
__UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _lowercase ( __A ,__A = 30 ):
'''simple docstring'''
if not isinstance(__A ,(int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__A ,__A ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
__UpperCamelCase = float(__A )
__UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = MODEL_FOR_MASKED_LM_MAPPING
__SCREAMING_SNAKE_CASE = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
__UpperCamelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 3_8_0_1_5, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 2_5_5_0_6, """token_str""": """ accuser"""},
] , )
__UpperCamelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 3_8_0_1_5,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 2_5_5_0_6,
"""token_str""": """ accuser""",
},
] , )
__UpperCamelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
__UpperCamelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 3_5_6_7_6, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
__UpperCamelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
__UpperCamelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
] , )
__UpperCamelCase = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
__UpperCamelCase = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase , lowercase )
@slow
@require_torch
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(lowercase )
@slow
@require_tf
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(lowercase )
def __lowerCamelCase ( self , lowercase ) -> Dict:
__UpperCamelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 6_1_0, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_5_7_3, """token_str""": """ Chris"""},
] , )
__UpperCamelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_2_0_1,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2_7_9_0,
"""token_str""": """ Lyon""",
},
] , )
__UpperCamelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
__UpperCamelCase = None
__UpperCamelCase = None
self.run_pipeline_test(lowercase , [] )
@require_tf
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
__UpperCamelCase = None
__UpperCamelCase = None
self.run_pipeline_test(lowercase , [] )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def __lowerCamelCase ( self , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = fill_masker.tokenizer
__UpperCamelCase = fill_masker.model
__UpperCamelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
lowercase , [
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
] , )
with self.assertRaises(lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase ):
fill_masker("""This is""" )
self.run_test_top_k(lowercase , lowercase )
self.run_test_targets(lowercase , lowercase )
self.run_test_top_k_targets(lowercase , lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase , lowercase )
self.fill_mask_with_multiple_masks(lowercase , lowercase )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = tokenizer.get_vocab()
__UpperCamelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase , targets=lowercase )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowercase )
__UpperCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowercase ) )
# Call argument
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowercase )
__UpperCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowercase ) )
# Score equivalence
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase )
__UpperCamelCase = [top_mask["""token_str"""] for top_mask in outputs]
__UpperCamelCase = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ) == set(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase )
__UpperCamelCase = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
# Raises with invalid
with self.assertRaises(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""""""] )
with self.assertRaises(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="""""" )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Any:
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase , top_k=2 )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
] , )
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Any:
__UpperCamelCase = tokenizer.get_vocab()
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
# top_k=2, ntargets=3
__UpperCamelCase = sorted(vocab.keys() )[:3]
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCamelCase = [el["""token_str"""] for el in sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ).issubset(lowercase ):
__UpperCamelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCamelCase = sorted(vocab.keys() )[:3]
__UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCamelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=lowercase , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase ) , 3 )
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
__UpperCamelCase = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
__UpperCamelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
lowercase , [
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
[
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
{"""sequence""": ANY(lowercase ), """score""": ANY(lowercase ), """token""": ANY(lowercase ), """token_str""": ANY(lowercase )},
],
] , )
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
from torch import nn
def _lowercase ( __A ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''ibert'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=False , lowercase="none" , **lowercase , ) -> str:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = quant_mode
__UpperCamelCase = force_dequant
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 349 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _lowercase ( ):
'''simple docstring'''
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=lowercase , encoding=lowercase )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 349 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase ) -> Dict:
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = 1_0_0 , lowercase = None , lowercase = None , lowercase = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
__UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
__UpperCamelCase = int(lowercase )
if sample_size % down_scale_factor != 0:
__UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
__UpperCamelCase = int(lowercase )
__UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
__UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__UpperCamelCase = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
# set step values
self.scheduler.set_timesteps(lowercase , device=audio.device )
__UpperCamelCase = self.scheduler.timesteps.to(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCamelCase = self.unet(lowercase , lowercase ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCamelCase = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
__UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase )
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : str = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
a__ : Tuple = list[list[int]]
# assigning initial values to the grid
a__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowercase ( __A ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowercase ( __A ):
'''simple docstring'''
if location := find_empty_location(__A ):
__UpperCamelCase , __UpperCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(__A ,__A ,__A ,__A ):
__UpperCamelCase = digit
if sudoku(__A ) is not None:
return grid
__UpperCamelCase = 0
return None
def _lowercase ( __A ):
'''simple docstring'''
for row in grid:
for cell in row:
print(__A ,end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 2_0)
print_solution(example_grid)
print('\nExample grid solution:')
a__ : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a__ : str = logging.getLogger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''summarization'''
__SCREAMING_SNAKE_CASE = ['''loss''']
__SCREAMING_SNAKE_CASE = ROUGE_KEYS
__SCREAMING_SNAKE_CASE = '''rouge2'''
def __init__( self , lowercase , **lowercase ) -> Optional[int]:
if hparams.sortish_sampler and hparams.gpus > 1:
__UpperCamelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__UpperCamelCase = Path(self.output_dir ) / """metrics.json"""
__UpperCamelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__UpperCamelCase = 0
__UpperCamelCase = defaultdict(lowercase )
__UpperCamelCase = self.config.model_type
__UpperCamelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__UpperCamelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__UpperCamelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__UpperCamelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__UpperCamelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__UpperCamelCase = get_git_info()["""repo_sha"""]
__UpperCamelCase = hparams.num_workers
__UpperCamelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
__UpperCamelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__UpperCamelCase = self.decoder_start_token_id
__UpperCamelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__UpperCamelCase = False
__UpperCamelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__UpperCamelCase = self.hparams.eval_max_gen_length
else:
__UpperCamelCase = self.model.config.max_length
__UpperCamelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __lowerCamelCase ( self , lowercase ) -> Dict[str, List[str]]:
__UpperCamelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__UpperCamelCase = True
return readable_batch
def __lowerCamelCase ( self , lowercase , **lowercase ) -> str:
return self.model(lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase ) -> int:
__UpperCamelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def __lowerCamelCase ( self , lowercase ) -> Tuple:
__UpperCamelCase = self.tokenizer.pad_token_id
__UpperCamelCase , __UpperCamelCase = batch["""input_ids"""], batch["""attention_mask"""]
__UpperCamelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
__UpperCamelCase = self.model._shift_right(lowercase )
else:
__UpperCamelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__UpperCamelCase = decoder_input_ids
self.save_readable_batch(lowercase )
__UpperCamelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
__UpperCamelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__UpperCamelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
__UpperCamelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__UpperCamelCase = nn.functional.log_softmax(lowercase , dim=-1 )
__UpperCamelCase , __UpperCamelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def __lowerCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase = self._step(lowercase )
__UpperCamelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
__UpperCamelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__UpperCamelCase = batch["""input_ids"""].shape[0]
__UpperCamelCase = batch["""input_ids"""].eq(self.pad ).sum()
__UpperCamelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def __lowerCamelCase ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
__UpperCamelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__UpperCamelCase = losses["""loss"""]
__UpperCamelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__UpperCamelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__UpperCamelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
__UpperCamelCase = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
__UpperCamelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
__UpperCamelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def __lowerCamelCase ( self , lowercase ) -> dict:
__UpperCamelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__UpperCamelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__UpperCamelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
__UpperCamelCase = self.ids_to_clean_text(lowercase )
__UpperCamelCase = self.ids_to_clean_text(batch["""labels"""] )
__UpperCamelCase = self._step(lowercase )
__UpperCamelCase = dict(zip(self.loss_names , lowercase ) )
__UpperCamelCase = self.calc_generative_metrics(lowercase , lowercase )
__UpperCamelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
return self._generative_step(lowercase )
def __lowerCamelCase ( self , lowercase ) -> Union[str, Any]:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def __lowerCamelCase ( self , lowercase ) -> SeqaSeqDataset:
__UpperCamelCase = self.n_obs[type_path]
__UpperCamelCase = self.target_lens[type_path]
__UpperCamelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
__UpperCamelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__UpperCamelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__UpperCamelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def __lowerCamelCase ( self ) -> DataLoader:
__UpperCamelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def __lowerCamelCase ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def __lowerCamelCase ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> Dict:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=5_0_0 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''translation'''
__SCREAMING_SNAKE_CASE = ['''loss''']
__SCREAMING_SNAKE_CASE = ['''bleu''']
__SCREAMING_SNAKE_CASE = '''bleu'''
def __init__( self , lowercase , **lowercase ) -> List[str]:
super().__init__(lowercase , **lowercase )
__UpperCamelCase = hparams.src_lang
__UpperCamelCase = hparams.tgt_lang
def __lowerCamelCase ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def _lowercase ( __A ,__A=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__A )
check_output_dir(__A ,expected_items=3 )
if model is None:
if "summarization" in args.task:
__UpperCamelCase = SummarizationModule(__A )
else:
__UpperCamelCase = TranslationModule(__A )
__UpperCamelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__UpperCamelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__UpperCamelCase = os.environ.get("""WANDB_PROJECT""" ,__A )
__UpperCamelCase = WandbLogger(name=model.output_dir.name ,project=__A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__UpperCamelCase = WandbLogger(name=model.output_dir.name ,project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
__UpperCamelCase = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
__UpperCamelCase = False
__UpperCamelCase = args.val_metric == """loss"""
__UpperCamelCase = generic_train(
__A ,__A ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__A ) ,early_stopping_callback=__A ,logger=__A ,)
pickle_save(model.hparams ,model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__UpperCamelCase = """"""
__UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir ,"""*.ckpt""" ) ,recursive=__A ) )
if checkpoints:
__UpperCamelCase = checkpoints[-1]
__UpperCamelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
a__ : Tuple = pl.Trainer.add_argparse_args(parser)
a__ : Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a__ : str = parser.parse_args()
main(args)
| 349 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 1 |
'''simple docstring'''
a__ : List[str] = tuple[float, float, float]
a__ : Any = tuple[float, float, float]
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = end_pointa[0] - end_pointa[0]
__UpperCamelCase = end_pointa[1] - end_pointa[1]
__UpperCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
__UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _lowercase ( __A ,__A ):
'''simple docstring'''
return tuple(round(__A ,__A ) for x in vector ) == (0, 0, 0)
def _lowercase ( __A ,__A ,__A ,__A = 10 ):
'''simple docstring'''
__UpperCamelCase = create_vector(__A ,__A )
__UpperCamelCase = create_vector(__A ,__A )
return is_zero_vector(get_ad_vectors_cross(__A ,__A ) ,__A )
| 349 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
| 349 | 1 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase__ ( UpperCAmelCase_):
@slow
@require_torch
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__UpperCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 1_2_8
__UpperCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__UpperCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__UpperCamelCase = train_dataset.select(range(3_2 ) )
__UpperCamelCase = val_dataset.select(range(1_6 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(lowercase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowercase , max_length=5_1_2 )
__UpperCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowercase , max_length=1_2_8 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__UpperCamelCase = outputs.attention_mask
assert all(len(lowercase ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowercase ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase ) )] ) / len(lowercase )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase , batch_size=lowercase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase , batch_size=lowercase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=lowercase , per_device_train_batch_size=lowercase , per_device_eval_batch_size=lowercase , predict_with_generate=lowercase , evaluation_strategy="""steps""" , do_train=lowercase , do_eval=lowercase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=lowercase , args=lowercase , compute_metrics=_compute_metrics , train_dataset=lowercase , eval_dataset=lowercase , tokenizer=lowercase , )
# start training
trainer.train()
| 349 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 | 1 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__ :
def __init__( self , lowercase ) -> Tuple:
__UpperCamelCase = data
__UpperCamelCase = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> List[str]:
return ((n << b) | (n >> (3_2 - b))) & 0Xffffffff
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = b"""\x80""" + b"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
__UpperCamelCase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __lowerCamelCase ( self ) -> Dict:
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = list(struct.unpack(""">16L""" , lowercase ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
__UpperCamelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.padding()
__UpperCamelCase = self.split_blocks()
for block in self.blocks:
__UpperCamelCase = self.expand_block(lowercase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
__UpperCamelCase = (b & c) | ((~b) & d)
__UpperCamelCase = 0X5a827999
elif 2_0 <= i < 4_0:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = 0X6ed9eba1
elif 4_0 <= i < 6_0:
__UpperCamelCase = (b & c) | (b & d) | (c & d)
__UpperCamelCase = 0X8f1bbcdc
elif 6_0 <= i < 8_0:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = 0Xca62c1d6
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
self.rotate(lowercase , 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(lowercase , 3_0 ),
c,
d,
)
__UpperCamelCase = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = b"""Test String"""
assert SHAaHash(__A ).final_hash() == hashlib.shaa(__A ).hexdigest() # noqa: S324
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = bytes(__A ,"""utf-8""" )
print(SHAaHash(__A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCamelCase = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCamelCase = 0.01
with locka.acquire():
with pytest.raises(__A ):
__UpperCamelCase = time.time()
locka.acquire(__A )
assert time.time() - _start > timeout
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """a""" * 1_000 + """.lock"""
__UpperCamelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__A )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__UpperCamelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__A ):
locka.acquire(0 )
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349 | 1 |
'''simple docstring'''
from statistics import mean, stdev
def _lowercase ( __A ,__A = 3 ):
'''simple docstring'''
__UpperCamelCase = min(__A )
__UpperCamelCase = max(__A )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,__A ) for x in data]
def _lowercase ( __A ,__A = 3 ):
'''simple docstring'''
__UpperCamelCase = mean(__A )
__UpperCamelCase = stdev(__A )
# standardize data
return [round((x - mu) / (sigma) ,__A ) for x in data]
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
a__ : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase_ , metadata={'''help''': '''The input training data file (a text file).'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __lowerCamelCase ( self ) -> Union[str, Any]:
if self.train_file is not None:
__UpperCamelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__UpperCamelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , lowercase ) -> str:
__UpperCamelCase = """label""" if """label""" in features[0].keys() else """labels"""
__UpperCamelCase = [feature.pop(lowercase ) for feature in features]
__UpperCamelCase = len(lowercase )
__UpperCamelCase = len(features[0]["""input_ids"""] )
__UpperCamelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
__UpperCamelCase = list(chain(*lowercase ) )
__UpperCamelCase = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__UpperCamelCase = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
__UpperCamelCase = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" ,__A ,__A )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__UpperCamelCase = {}
if data_args.train_file is not None:
__UpperCamelCase = data_args.train_file
if data_args.validation_file is not None:
__UpperCamelCase = data_args.validation_file
__UpperCamelCase = data_args.train_file.split(""".""" )[-1]
__UpperCamelCase = load_dataset(
__A ,data_files=__A ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
__UpperCamelCase = load_dataset(
"""swag""" ,"""regular""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=__A ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__UpperCamelCase = [f"ending{i}" for i in range(4 )]
__UpperCamelCase = """sent1"""
__UpperCamelCase = """sent2"""
if data_args.max_seq_length is None:
__UpperCamelCase = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__UpperCamelCase = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__UpperCamelCase = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__A ):
__UpperCamelCase = [[context] * 4 for context in examples[context_name]]
__UpperCamelCase = examples[question_header_name]
__UpperCamelCase = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(__A )
]
# Flatten out
__UpperCamelCase = list(chain(*__A ) )
__UpperCamelCase = list(chain(*__A ) )
# Tokenize
__UpperCamelCase = tokenizer(
__A ,__A ,truncation=__A ,max_length=__A ,padding="""max_length""" if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(__A ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__UpperCamelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__UpperCamelCase = min(len(__A ) ,data_args.max_train_samples )
__UpperCamelCase = train_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCamelCase = train_dataset.map(
__A ,batched=__A ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__UpperCamelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__UpperCamelCase = min(len(__A ) ,data_args.max_eval_samples )
__UpperCamelCase = eval_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCamelCase = eval_dataset.map(
__A ,batched=__A ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
__UpperCamelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__A ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__A ):
__UpperCamelCase , __UpperCamelCase = eval_predictions
__UpperCamelCase = np.argmax(__A ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=__A ,args=__A ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=__A ,data_collator=__A ,compute_metrics=__A ,)
# Training
if training_args.do_train:
__UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase = last_checkpoint
__UpperCamelCase = trainer.train(resume_from_checkpoint=__A )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCamelCase = train_result.metrics
__UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
__UpperCamelCase = min(__A ,len(__A ) )
trainer.log_metrics("""train""" ,__A )
trainer.save_metrics("""train""" ,__A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
__UpperCamelCase = min(__A ,len(__A ) )
trainer.log_metrics("""eval""" ,__A )
trainer.save_metrics("""eval""" ,__A )
__UpperCamelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def _lowercase ( __A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = len(__A ), len(grid[0] )
if (
min(__A ,__A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__UpperCamelCase = 0
count += depth_first_search(__A ,row + 1 ,__A ,__A )
count += depth_first_search(__A ,row - 1 ,__A ,__A )
count += depth_first_search(__A ,__A ,col + 1 ,__A )
count += depth_first_search(__A ,__A ,col - 1 ,__A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a__ : int = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase_)
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
@dataclass(frozen=UpperCAmelCase_)
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 42
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = False , ) -> Optional[Any]:
__UpperCamelCase = hans_processors[task]()
__UpperCamelCase = os.path.join(
lowercase , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(lowercase ) , lowercase , ) , )
__UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__UpperCamelCase , __UpperCamelCase = label_list[2], label_list[1]
__UpperCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCamelCase = cached_features_file + """.lock"""
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
__UpperCamelCase = torch.load(lowercase )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
__UpperCamelCase = (
processor.get_dev_examples(lowercase ) if evaluate else processor.get_train_examples(lowercase )
)
logger.info("""Training examples: %s""" , len(lowercase ) )
__UpperCamelCase = hans_convert_examples_to_features(lowercase , lowercase , lowercase , lowercase )
logger.info("""Saving features into cached file %s""" , lowercase )
torch.save(self.features , lowercase )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
def __lowerCamelCase ( self ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1_2_8 , lowercase=False , lowercase = False , ) -> Union[str, Any]:
__UpperCamelCase = hans_processors[task]()
__UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__UpperCamelCase , __UpperCamelCase = label_list[2], label_list[1]
__UpperCamelCase = label_list
__UpperCamelCase = processor.get_dev_examples(lowercase ) if evaluate else processor.get_train_examples(lowercase )
__UpperCamelCase = hans_convert_examples_to_features(lowercase , lowercase , lowercase , lowercase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(lowercase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__UpperCamelCase = tf.data.Dataset.from_generator(
lowercase , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __lowerCamelCase ( self ) -> Tuple:
return self.dataset
def __len__( self ) -> Any:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
def __lowerCamelCase ( self ) -> List[Any]:
return self.label_list
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
return self._create_examples(self._read_tsv(os.path.join(lowercase , """heuristics_train_set.txt""" ) ) , """train""" )
def __lowerCamelCase ( self , lowercase ) -> str:
return self._create_examples(self._read_tsv(os.path.join(lowercase , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def __lowerCamelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[Any]:
__UpperCamelCase = []
for i, line in enumerate(lowercase ):
if i == 0:
continue
__UpperCamelCase = """%s-%s""" % (set_type, line[0])
__UpperCamelCase = line[5]
__UpperCamelCase = line[6]
__UpperCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
__UpperCamelCase = line[0]
examples.append(InputExample(guid=lowercase , text_a=lowercase , text_b=lowercase , label=lowercase , pairID=lowercase ) )
return examples
def _lowercase ( __A ,__A ,__A ,__A ,):
'''simple docstring'''
__UpperCamelCase = {label: i for i, label in enumerate(__A )}
__UpperCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) ,desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
__UpperCamelCase = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=__A ,max_length=__A ,padding="""max_length""" ,truncation=__A ,return_overflowing_tokens=__A ,)
__UpperCamelCase = label_map[example.label] if example.label in label_map else 0
__UpperCamelCase = int(example.pairID )
features.append(InputFeatures(**__A ,label=__A ,pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
a__ : Optional[int] = {
'hans': 3,
}
a__ : List[str] = {
'hans': HansProcessor,
}
| 349 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Any = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
a__ : List[Any] = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = collections.OrderedDict()
with open(__A ,"""r""" ,encoding="""utf-8""" ) as reader:
__UpperCamelCase = reader.readlines()
for index, token in enumerate(__A ):
__UpperCamelCase = token.rstrip("""\n""" )
__UpperCamelCase = index
return vocab
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase="<unk>" , lowercase=2_0_0 ) -> Dict:
__UpperCamelCase = vocab
__UpperCamelCase = unk_token
__UpperCamelCase = max_input_chars_per_word
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
__UpperCamelCase = list(lowercase )
if len(lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
__UpperCamelCase = 0
__UpperCamelCase = []
while start < len(lowercase ):
__UpperCamelCase = len(lowercase )
__UpperCamelCase = None
while start < end:
__UpperCamelCase = """""".join(chars[start:end] )
if substr in self.vocab:
__UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase )
__UpperCamelCase = end
return sub_tokens
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = False
def __init__( self , lowercase , lowercase="<d>" , lowercase="</d>" , lowercase="<s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="</n>" , lowercase="</_>" , lowercase="left" , **lowercase , ) -> Tuple:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=lowercase , eod_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , unk_token=lowercase , line_token=lowercase , space_token=lowercase , padding_side=lowercase , **lowercase , )
__UpperCamelCase = bod_token
__UpperCamelCase = eod_token
__UpperCamelCase = load_vocab(lowercase )
__UpperCamelCase = self.encoder[space_token]
__UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
__UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowerCamelCase ( self ) -> List[str]:
return self.encoder[self.bod_token]
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
return self.encoder[self.eod_token]
@property
def __lowerCamelCase ( self ) -> Tuple:
return self.encoder["\n"]
@property
def __lowerCamelCase ( self ) -> int:
return len(self.encoder )
def __lowerCamelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
__UpperCamelCase = []
for x in jieba.cut(lowercase , cut_all=lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase ) )
return output_tokens
def __lowerCamelCase ( self , lowercase , **lowercase ) -> Union[str, Any]:
__UpperCamelCase = [i for i in token_ids if i >= 0]
__UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase ) -> int:
return token in self.encoder
def __lowerCamelCase ( self , lowercase ) -> str:
return "".join(lowercase )
def __lowerCamelCase ( self , lowercase ) -> Dict:
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , lowercase ) -> str:
return self.decoder.get(lowercase , self.unk_token )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
if os.path.isdir(lowercase ):
__UpperCamelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__UpperCamelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
__UpperCamelCase = 0
if " " in self.encoder:
__UpperCamelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
__UpperCamelCase = self.encoder["""\n"""]
del self.encoder["\n"]
__UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
with open(lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
__UpperCamelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase ))
return [1] + ([0] * len(lowercase ))
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = int(__A )
if decimal in (0, 1): # Exit cases for the recursion
return str(__A )
__UpperCamelCase , __UpperCamelCase = divmod(__A ,2 )
return binary_recursive(__A ) + str(__A )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(__A ).strip()
if not number:
raise ValueError("""No input value was provided""" )
__UpperCamelCase = """-""" if number.startswith("""-""" ) else """"""
__UpperCamelCase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f"{negative}0b{binary_recursive(int(__A ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__A ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _lowercase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__A ):
http_head("""https://huggingface.co""" )
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 1 |
'''simple docstring'''
from math import factorial
def _lowercase ( __A = 20 ):
'''simple docstring'''
__UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__UpperCamelCase = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
a__ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a__ : List[str] = logging.getLogger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase=-1 ) -> Optional[int]:
# in NER datasets, the last column is usually reserved for NER label
__UpperCamelCase = label_idx
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
if isinstance(lowercase , lowercase ):
__UpperCamelCase = mode.value
__UpperCamelCase = os.path.join(lowercase , f"{mode}.txt" )
__UpperCamelCase = 1
__UpperCamelCase = []
with open(lowercase , encoding="""utf-8""" ) as f:
__UpperCamelCase = []
__UpperCamelCase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=lowercase , labels=lowercase ) )
guid_index += 1
__UpperCamelCase = []
__UpperCamelCase = []
else:
__UpperCamelCase = line.split(""" """ )
words.append(splits[0] )
if len(lowercase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=lowercase , labels=lowercase ) )
return examples
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__UpperCamelCase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
if path:
with open(lowercase , """r""" ) as f:
__UpperCamelCase = f.read().splitlines()
if "O" not in labels:
__UpperCamelCase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self ) -> Optional[Any]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
if path:
with open(lowercase , """r""" ) as f:
__UpperCamelCase = f.read().splitlines()
if "O" not in labels:
__UpperCamelCase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
if isinstance(lowercase , lowercase ):
__UpperCamelCase = mode.value
__UpperCamelCase = os.path.join(lowercase , f"{mode}.txt" )
__UpperCamelCase = 1
__UpperCamelCase = []
with open(lowercase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase ):
__UpperCamelCase = []
__UpperCamelCase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase ) == len(lowercase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=lowercase , labels=lowercase ) )
guid_index += 1
return examples
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> List[str]:
__UpperCamelCase = 0
for sentence in parse_incr(lowercase ):
__UpperCamelCase = preds_list[example_id]
__UpperCamelCase = """"""
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(lowercase )
example_id += 1
def __lowerCamelCase ( self , lowercase ) -> List[str]:
if path:
with open(lowercase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 349 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a__ : int = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
def _a ( a :int ) -> bool:
if num < 0:
return False
a = num
a = 0
while num > 0:
a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
UpperCAmelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
UpperCAmelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
UpperCAmelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
UpperCAmelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
UpperCAmelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
UpperCAmelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
UpperCAmelCase_ = key.replace("image_encoder.module" , "flava.image_model" )
UpperCAmelCase_ = key.replace("text_encoder.module" , "flava.text_model" )
UpperCAmelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
UpperCAmelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
UpperCAmelCase_ = key.replace("text_projection" , "flava.text_projection" )
UpperCAmelCase_ = key.replace("image_projection" , "flava.image_projection" )
UpperCAmelCase_ = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase_ = value
return upgrade
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = FlavaConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = FlavaConfig()
UpperCAmelCase_ = FlavaForPreTraining(snake_case_ ).eval()
UpperCAmelCase_ = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ )
if os.path.exists(snake_case_ ):
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = upgrade_state_dict(snake_case_ , snake_case_ )
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = hf_model.state_dict()
UpperCAmelCase_ = count_parameters(snake_case_ )
UpperCAmelCase_ = count_parameters(snake_case_ ) + count_parameters(snake_case_ )
assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = """time_series_transformer"""
lowerCAmelCase__ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self : Any , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "student_t" , UpperCamelCase : str = "nll" , UpperCamelCase : int = 1 , UpperCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase : Optional[Union[str, bool]] = "mean" , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : int = 32 , UpperCamelCase : int = 32 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : bool = True , UpperCamelCase : str = "gelu" , UpperCamelCase : int = 64 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : int = 100 , UpperCamelCase : float = 0.02 , UpperCamelCase : Tuple=True , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
lowercase__ = prediction_length
lowercase__ = context_length or prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = cardinality
else:
lowercase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(UpperCamelCase ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 2 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase : List[str] = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
A : Optional[Any] = bnb_quantization_config.load_in_abit
A : Tuple = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
A : Optional[Any] = []
# custom device map
if isinstance(snake_case__ , snake_case__ ) and len(device_map.keys() ) > 1:
A : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : Optional[int] = get_keys_to_not_convert(snake_case__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__ )
A : Dict = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Any = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__ )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Dict = load_in_abit
A : Tuple = get_parameter_device(snake_case__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
A : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__ )
# convert param to the right dtype
A : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : str = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
A : str = getattr(snake_case__ , snake_case__ , snake_case__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case__ ):
param.to(snake_case__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
A : Optional[Any] = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__ )
A : Dict = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : List[Any] = True
A : Optional[int] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
A : List[Any] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(snake_case__ , snake_case__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
A : str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Union[str, Any] = {}
A : Any = special_dtypes
A : Dict = no_split_module_classes
A : Tuple = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : List[Any] = get_balanced_memory(
snake_case__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=snake_case__ , **snake_case__ , )
A : Dict = max_memory
A : Optional[int] = infer_auto_device_map(snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
# check if don't have any quantized module on the cpu
A : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : List[str] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if modules_to_not_convert is None:
A : Optional[int] = []
A, A : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : List[str] = []
current_key_name.append(snake_case__ )
if isinstance(snake_case__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Union[str, Any] = '''.'''.join(snake_case__ )
A : int = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
A : str = module.weight.data
if module.bias is not None:
A : Tuple = module.bias.data
bnb_module.requires_grad_(snake_case__ )
setattr(snake_case__ , snake_case__ , snake_case__ )
A : Tuple = True
if len(list(module.children() ) ) > 0:
A, A : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A : List[Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
with init_empty_weights():
A : Tuple = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[Any] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
A : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Union[str, Any] = sum(snake_case__ , [] )
A : str = len(snake_case__ ) > 0
# Check if it is a base model
A : Optional[int] = False
if hasattr(snake_case__ , '''base_model_prefix''' ):
A : Tuple = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : Tuple = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : Optional[Any] = set(snake_case__ ) - set(snake_case__ )
A : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
A : Tuple = ['''.weight''', '''.bias''']
A : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : int = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__ )
A : str = param_name
A : Tuple = model
if "." in tensor_name:
A : str = tensor_name.split('''.''' )
for split in splits[:-1]:
A : int = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
A : Dict = new_module
A : int = splits[-1]
# offload weights
A : Tuple = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__ )
offload_weight(snake_case__ , param_name.replace('''weight''' , '''SCB''' ) , snake_case__ , index=snake_case__ )
set_module_tensor_to_device(snake_case__ , snake_case__ , '''meta''' , dtype=snake_case__ , value=torch.empty(*param.size() ) )
| 3 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 0 |
'''simple docstring'''
__snake_case ={str(digit): digit**5 for digit in range(10)}
def a_ ( lowerCamelCase : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase ) )
def a_ ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCamelCase ) )
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 0 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> tuple[float, float]:
"""simple docstring"""
if not len(__snake_case ) == len(__snake_case ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase =equationa
_lowercase , _lowercase , _lowercase =equationa
# Calculate the determinants of the matrices
_lowercase =aa * ba - aa * ba
_lowercase =ca * ba - ca * ba
_lowercase =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase =determinant_x / determinant
_lowercase =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
A : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A , A : List[Any] = gray_img.shape
# set different points to rotate image
A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
A : Tuple = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
A : Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
A : Any = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
A : Optional[int] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Optional[int] = plt.figure(1)
A : Tuple = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
| 349 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A :
"""simple docstring"""
def __init__( self : str,lowercase_ : List[str],lowercase_ : Optional[int]=1_3,lowercase_ : Union[str, Any]=7,lowercase_ : str=True,lowercase_ : Tuple=True,lowercase_ : Any=True,lowercase_ : Optional[Any]=True,lowercase_ : Union[str, Any]=9_9,lowercase_ : Any=6_4,lowercase_ : Optional[int]=3_2,lowercase_ : Union[str, Any]=5,lowercase_ : str=4,lowercase_ : str=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Optional[int]=5_1_2,lowercase_ : int=1_6,lowercase_ : Dict=2,lowercase_ : Dict=0.02,lowercase_ : List[str]=3,lowercase_ : List[str]=4,lowercase_ : List[str]=None,)-> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = embedding_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,embedding_size=self.embedding_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,)
def snake_case__ ( self : Union[str, Any],lowercase_ : int,lowercase_ : List[str],lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Any,lowercase_ : int,lowercase_ : Dict )-> Dict:
'''simple docstring'''
A__ = MegatronBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : List[Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Optional[Any],lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
A__ = MegatronBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[Any],lowercase_ : Tuple,lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : List[str],lowercase_ : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = MegatronBertForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[Any],lowercase_ : Union[str, Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : List[str] )-> Any:
'''simple docstring'''
A__ = MegatronBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, 2) )
def snake_case__ ( self : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str],lowercase_ : Any,lowercase_ : Any,lowercase_ : Optional[Any],lowercase_ : List[Any],lowercase_ : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = MegatronBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_,next_sentence_label=lowercase_,)
self.parent.assertEqual(result.prediction_logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape,(self.batch_size, 2) )
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : List[Any],lowercase_ : Optional[int],lowercase_ : List[Any],lowercase_ : str,lowercase_ : Dict )-> Dict:
'''simple docstring'''
A__ = MegatronBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : Dict,lowercase_ : Optional[int],lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : Any )-> Optional[Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = MegatronBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : Optional[int],lowercase_ : Union[str, Any],lowercase_ : Tuple,lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : Tuple )-> int:
'''simple docstring'''
A__ = self.num_labels
A__ = MegatronBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Any,lowercase_ : Any,lowercase_ : Union[str, Any],lowercase_ : Tuple,lowercase_ : int,lowercase_ : Optional[Any],lowercase_ : Dict,lowercase_ : Any )-> Any:
'''simple docstring'''
A__ = self.num_choices
A__ = MegatronBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = model(
lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
# test_resize_embeddings = False
lowerCamelCase = False
def snake_case__ ( self : List[Any],lowercase_ : Any,lowercase_ : List[Any],lowercase_ : List[str]=False )-> Optional[Any]:
'''simple docstring'''
A__ = super()._prepare_for_class(lowercase_,lowercase_,return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),dtype=torch.long,device=lowercase_ )
A__ = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=lowercase_ )
return inputs_dict
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
A__ = MegatronBertModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase_ )
def snake_case__ ( self : int )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase_ )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase_ )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase_ )
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase_ )
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> Any:
'''simple docstring'''
return torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
lowercase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
A__ = os.path.join(os.environ['MYDIR'],lowercase_ )
A__ = MegatronBertModel.from_pretrained(lowercase_ )
model.to(lowercase_ )
model.half()
A__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
A__ = model(lowercase_ )[0]
A__ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape,lowercase_ )
A__ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
A__ = output[0, ii, jj]
A__ = expected[3 * ii + jj]
A__ = 'ii={} jj={} a={} b={}'.format(lowercase_,lowercase_,lowercase_,lowercase_ )
self.assertTrue(math.isclose(lowercase_,lowercase_,rel_tol=lowercase_,abs_tol=lowercase_ ),msg=lowercase_ )
| 7 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__( self : Any , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[str] ) ->str:
snake_case_ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
snake_case_ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Dict ) ->Optional[int]:
for example in examples:
snake_case_ = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'''score''': ANY(_UpperCamelCase ), '''label''': ANY(_UpperCamelCase )},
{'''score''': ANY(_UpperCamelCase ), '''label''': ANY(_UpperCamelCase )},
] , )
@require_torch
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 1_0} , crop_size={'''height''': 1_0, '''width''': 1_0} )
snake_case_ = pipeline(
'''video-classification''' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
snake_case_ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
snake_case_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def snake_case__( self : Optional[int] ) ->Any:
pass | 8 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCAmelCase : Tuple =data_utils.TransfoXLTokenizer
__lowerCAmelCase : int =data_utils.TransfoXLCorpus
__lowerCAmelCase : str =data_utils
__lowerCAmelCase : Dict =data_utils
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase__ , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Dict = pickle.load(lowercase__ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
__SCREAMING_SNAKE_CASE : Any = corpus.vocab.__dict__
torch.save(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , lowercase__ )
__SCREAMING_SNAKE_CASE : int = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(lowercase__ , lowercase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.abspath(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath(lowercase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLConfig()
else:
__SCREAMING_SNAKE_CASE : List[Any] = TransfoXLConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : List[Any] = TransfoXLLMHeadModel(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = load_tf_weights_in_transfo_xl(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowercase__ , lowercase__ )
print(F'''Save PyTorch model to {os.path.abspath(lowercase__ )}''' )
torch.save(model.state_dict() , lowercase__ )
print(F'''Save configuration file to {os.path.abspath(lowercase__ )}''' )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 9 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349 | 0 |
from random import randint, random
def lowerCAmelCase_ ( __a , __a , __a , __a = False , __a = False , __a = 5 , ) -> list:
"""simple docstring"""
lowerCamelCase__: Tuple =[[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase__: Dict =0
lowerCamelCase__: Any =max(__a , 0 )
while i < number_of_cells:
lowerCamelCase__: str =(
randint(0 , __a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
lowerCamelCase__: Optional[int] =highway_now[car_index + 1 :]
for cell in range(len(__a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__a , -1 )
def lowerCAmelCase_ ( __a , __a , __a ) -> list:
"""simple docstring"""
lowerCamelCase__: Optional[int] =len(__a )
# Beforce calculations, the highway is empty
lowerCamelCase__: Dict =[-1] * number_of_cells
for car_index in range(__a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase__: int =min(highway_now[car_index] + 1 , __a )
# Number of empty cell before the next car
lowerCamelCase__: Union[str, Any] =get_distance(__a , __a ) - 1
# We can't have the car causing an accident
lowerCamelCase__: Dict =min(next_highway[car_index] , __a )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase__: int =max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> list:
"""simple docstring"""
lowerCamelCase__: Dict =len(highway[0] )
for i in range(__a ):
lowerCamelCase__: Dict =update(highway[i] , __a , __a )
lowerCamelCase__: List[str] =[-1] * number_of_cells
for car_index in range(__a ):
lowerCamelCase__: Any =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase__: Dict =(car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase__: Optional[Any] =speed
highway.append(__a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(a)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _lowerCamelCase ( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None) -> Dict:
_A : Optional[int] = {}
_A : Tuple = {}
if prompt is not None:
_A : Dict = prompt
if generate_kwargs is not None:
_A : Union[str, Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A : int = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one")
_A : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return super().__call__(__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Optional[Any]:
_A : List[Any] = load_image(__lowerCamelCase)
if prompt is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase):
raise ValueError(
F"Received an invalid text input, got - {type(__lowerCamelCase)} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation.")
_A : Optional[Any] = self.model.config.model_type
if model_type == "git":
_A : List[Any] = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework)
_A : Optional[int] = self.tokenizer(text=__lowerCamelCase , add_special_tokens=__lowerCamelCase).input_ids
_A : Any = [self.tokenizer.cls_token_id] + input_ids
_A : Any = torch.tensor(__lowerCamelCase).unsqueeze(0)
model_inputs.update({"input_ids": input_ids})
elif model_type == "pix2struct":
_A : List[str] = self.image_processor(images=__lowerCamelCase , header_text=__lowerCamelCase , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A : Optional[int] = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework)
_A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework)
model_inputs.update(__lowerCamelCase)
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation")
else:
_A : Any = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
_A : List[Any] = None
return model_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Dict:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , __lowerCamelCase)
and all(x is None for x in model_inputs["input_ids"])
):
_A : Tuple = None
if generate_kwargs is None:
_A : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A : List[str] = model_inputs.pop(self.model.main_input_name)
_A : int = self.model.generate(__lowerCamelCase , **__lowerCamelCase , **__lowerCamelCase)
return model_outputs
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
_A : List[str] = []
for output_ids in model_outputs:
_A : Any = {
"generated_text": self.tokenizer.decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , )
}
records.append(__lowerCamelCase)
return records
| 11 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = 'vision-encoder-decoder'
UpperCAmelCase__ : Union[str, Any] = True
def __init__( self: List[str] , **UpperCamelCase_: List[Any] ):
super().__init__(**UpperCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__lowerCamelCase = kwargs.pop("""encoder""" )
__lowerCamelCase = encoder_config.pop("""model_type""" )
__lowerCamelCase = kwargs.pop("""decoder""" )
__lowerCamelCase = decoder_config.pop("""model_type""" )
__lowerCamelCase = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = True
@classmethod
def lowerCAmelCase__ ( cls: Tuple , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: PretrainedConfig , **UpperCamelCase_: Any ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__lowerCamelCase = True
__lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.encoder.to_dict()
__lowerCamelCase = self.decoder.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = OrderedDict()
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: "PreTrainedTokenizerBase" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , ):
import torch
__lowerCamelCase = OrderedDict()
__lowerCamelCase = super().generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = dummy_input["""input_ids"""].shape
__lowerCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowerCamelCase = dummy_input.pop("""input_ids""" )
__lowerCamelCase = dummy_input.pop("""attention_mask""" )
__lowerCamelCase = torch.zeros(UpperCamelCase_ )
return common_inputs
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
def lowerCAmelCase__ ( self: str , UpperCamelCase_: PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: str = "default" ):
__lowerCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase_ , UpperCamelCase_ )
| 12 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def A_ ( _UpperCAmelCase ):
return (gray > 1_27) & (gray <= 2_55)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = np.zeros_like(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_: Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_: str = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCAmelCase : List[str] = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowerCAmelCase : Tuple = np.array(Image.open(lena_path))
# kernel to be applied
lowerCAmelCase : List[str] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCAmelCase : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCAmelCase : Tuple = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 13 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
A__ = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
A__ = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
A__ = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase_ )-1}""" )
if "norm" in key:
A__ = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
A__ = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase_ )-1}""" )
if "layer_norm1" in key:
A__ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
A__ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
A__ = key[key.find('''block''' ) + len('''block''' )]
A__ = key.replace(f"""block{idx}""" , f"""block.{int(lowercase_ )-1}""" )
if "attn.q" in key:
A__ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
A__ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
A__ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
A__ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
A__ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
A__ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
A__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
A__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
A__ = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase_ )-1}""" )
if "bot_conv" in key:
A__ = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
A__ = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
A__ = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
A__ = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
A__ = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
A__ = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
A__ = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
A__ = key.replace('''module.last_layer_depth''' , '''head.head''' )
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
A__ = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
A__ = kv_weight[
: config.hidden_sizes[i], :
]
A__ = kv_bias[: config.hidden_sizes[i]]
A__ = kv_weight[
config.hidden_sizes[i] :, :
]
A__ = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=None ) -> Any:
"""simple docstring"""
A__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ = GLPNImageProcessor()
# prepare image
A__ = prepare_img()
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )
# rename keys
A__ = rename_keys(lowercase_ )
# key and value matrices need special treatment
read_in_k_v(lowercase_ , lowercase_ )
# create HuggingFace model and load state dict
A__ = GLPNForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# forward pass
A__ = model(lowercase_ )
A__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
A__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase_ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowercase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_lowerCamelCase : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 14 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"question": Value("string" ), "context": Value("string" )} )
snake_case_ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def UpperCamelCase_ ( self : Dict ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 15 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : List[str] ):
__lowercase = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small", return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
__lowercase = AutoTokenizer.from_pretrained("google/mt5-small" )
__lowercase = tokenizer("Hello there", return_tensors="pt" ).input_ids
__lowercase = tokenizer("Hi I am", return_tensors="pt" ).input_ids
__lowercase = model(input_ids.to(UpperCAmelCase__ ), labels=labels.to(UpperCAmelCase__ ) ).loss
__lowercase = -(labels.shape[-1] * loss.item())
__lowercase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 17 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__lowerCamelCase : Dict = parser.parse_args()
if args.model_type == "bert":
__lowerCamelCase : Any = BertForMaskedLM.from_pretrained(args.model_name)
__lowerCamelCase : Any = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__lowerCamelCase : Dict = model.state_dict()
__lowerCamelCase : str = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCamelCase : int = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowerCamelCase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowerCamelCase : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowerCamelCase : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowerCamelCase : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowerCamelCase : Optional[Any] = state_dict['''cls.predictions.decoder.weight''']
__lowerCamelCase : Optional[int] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCamelCase : str = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowerCamelCase : Optional[Any] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 18 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__A =[
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase_ = "lm_head"
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
else:
lowerCamelCase_ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
if config_path is not None:
lowerCamelCase_ = UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase_ = Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.eos_index
lowerCamelCase_ = len(target_dict.symbols )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , "vocab.json" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCamelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ = 4_2
lowerCamelCase_ = 4_3
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase__ , )
lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCamelCase_ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCamelCase_ = UniSpeechForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 19 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __snake_case :
_a : Optional[str]= field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
_a : Optional[str]= field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_a : Optional[str]= field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
_a : Optional[str]= field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_a : Optional[int]= field(default=2 , metadata={"help": "Batch size for training."} )
_a : Optional[int]= field(default=2 , metadata={"help": "Batch size for evaluation."} )
_a : Optional[float]= field(default=0.1 , metadata={"help": "Value of weight decay."} )
_a : Optional[int]= field(
default=1_0000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_a : Optional[float]= field(default=2E-4 , metadata={"help": "Learning rate fo training."} )
_a : Optional[str]= field(default="cosine" , metadata={"help": "Learning rate."} )
_a : Optional[int]= field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_a : Optional[int]= field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
_a : Optional[bool]= field(
default=lowerCAmelCase , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_a : Optional[int]= field(default=5_0000 , metadata={"help": "Maximum number of training steps."} )
_a : Optional[int]= field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a : Optional[int]= field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
_a : Optional[int]= field(default=1 , metadata={"help": "Training seed."} )
_a : Optional[int]= field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
_a : Optional[str]= field(
default=lowerCAmelCase , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_a : Optional[bool]= field(default=lowerCAmelCase , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __snake_case :
_a : Optional[str]= field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_a : Optional[str]= field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_a : Optional[int]= field(default=2 , metadata={"help": "Batch size used for evaluation."} )
_a : Optional[int]= field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a : Optional[int]= field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
_a : Optional[int]= field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __snake_case :
_a : Optional[str]= field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_a : Optional[int]= field(default=lowerCAmelCase , metadata={"help": "Number of workers used for code evaluation."} )
_a : Optional[int]= field(
default=lowerCAmelCase , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
_a : Optional[bool]= field(
default=lowerCAmelCase , metadata={"help": "Sample from the language model's output distribution."} )
_a : Optional[float]= field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
_a : Optional[int]= field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
_a : Optional[int]= field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
_a : Optional[float]= field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
_a : Optional[int]= field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
_a : Optional[int]= field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
_a : Optional[int]= field(default=1 , metadata={"help": "Random seed used for evaluation."} )
_a : Optional[str]= field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
_a : Optional[str]= field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_a : Optional[int]= field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __snake_case :
_a : Optional[int]= field(
default=lowerCAmelCase , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
_a : Optional[str]= field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
_a : Optional[str]= field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
_a : Optional[int]= field(
default=10_0000 , metadata={"help": "Number of files to save per JSON output file."} )
_a : Optional[str]= field(default="content" , metadata={"help": "Column containing text data to process."} )
_a : Optional[float]= field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_a : Optional[float]= field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_a : Optional[float]= field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_a : Optional[float]= field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_a : Optional[float]= field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
_a : Optional[str]= field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
_a : Optional[bool]= field(
default=lowerCAmelCase , metadata={"help": "If True, near-duplicate samples are removed."} )
_a : Optional[float]= field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __snake_case :
_a : Optional[str]= field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
_a : Optional[str]= field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
_a : Optional[str]= field(default="content" , metadata={"help": "Column containing text data to process."} )
_a : Optional[int]= field(default=20_0000 , metadata={"help": "Number of examples to train tokenizer on."} )
_a : Optional[int]= field(
default=3_2768 , metadata={"help": "Number of examples to train the tokenizer on."} )
_a : Optional[str]= field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
_a : Optional[bool]= field(default=lowerCAmelCase , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __snake_case :
_a : Optional[str]= field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
_a : Optional[str]= field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
_a : Optional[str]= field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
_a : Optional[int]= field(default=lowerCAmelCase , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __snake_case :
_a : Optional[str]= field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
_a : Optional[str]= field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
_a : Optional[str]= field(default="codeparrot" , metadata={"help": "Name of the created model."} )
_a : Optional[bool]= field(default=lowerCAmelCase , metadata={"help": "Push saved tokenizer to the hub."} )
| 20 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 0 |
Subsets and Splits